From ee86201dfa335664abdfb9416388a51eb565e37e Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Sun, 31 Aug 2025 21:07:11 +0700 Subject: [PATCH 01/10] remove unuse things --- .github/ISSUE_TEMPLATE/bug-report.yaml | 147 --- .github/ISSUE_TEMPLATE/config.yml | 6 - .github/ISSUE_TEMPLATE/enhancement.yaml | 20 - .github/ISSUE_TEMPLATE/failing-test.yaml | 41 - .github/PULL_REQUEST_TEMPLATE.md | 44 - .github/dependabot.yml | 21 - .github/workflows/auto-label-os.yml | 32 - .../upgrade-patch-versions-schedule.yml | 55 - .github/workflows/upgrade-patch-versions.yml | 44 - .gitignore | 1 - .gitlab-ci.yml | 66 - .gitlab-ci/build.yml | 30 - .gitlab-ci/kubevirt.yml | 153 --- .gitlab-ci/lint.yml | 26 - .gitlab-ci/molecule.yml | 55 - .gitlab-ci/terraform.yml | 120 -- .gitlab-ci/vagrant.yml | 49 - .mdlrc | 1 - .nojekyll | 0 CHANGELOG.md | 1 - CNAME | 1 - CONTRIBUTING.md | 47 - LICENSE | 201 --- OWNERS | 8 - OWNERS_ALIASES | 27 - README.md | 58 +- RELEASE.md | 85 -- SECURITY_CONTACTS | 15 - Vagrantfile | 349 ------ code-of-conduct.md | 3 - contrib/aws_iam/kubernetes-master-policy.json | 27 - contrib/aws_iam/kubernetes-master-role.json | 10 - contrib/aws_iam/kubernetes-minion-policy.json | 45 - contrib/aws_iam/kubernetes-minion-role.json | 10 - .../aws_inventory/kubespray-aws-inventory.py | 81 -- contrib/aws_inventory/requirements.txt | 1 - contrib/azurerm/.gitignore | 2 - contrib/azurerm/README.md | 67 - contrib/azurerm/apply-rg.sh | 19 - contrib/azurerm/clear-rg.sh | 14 - contrib/azurerm/generate-inventory.sh | 18 - contrib/azurerm/generate-inventory.yml | 6 - contrib/azurerm/generate-inventory_2.yml | 6 - contrib/azurerm/generate-templates.yml | 6 - contrib/azurerm/group_vars/all | 51 - .../roles/generate-inventory/tasks/main.yml | 15 - .../generate-inventory/templates/inventory.j2 | 33 - .../roles/generate-inventory_2/tasks/main.yml | 31 - .../templates/inventory.j2 | 33 - .../templates/loadbalancer_vars.j2 | 8 - .../generate-templates/defaults/main.yml | 37 - .../roles/generate-templates/tasks/main.yml | 25 - .../templates/availability-sets.json | 30 - .../generate-templates/templates/bastion.json | 106 -- .../templates/clear-rg.json | 8 - .../generate-templates/templates/masters.json | 198 --- .../generate-templates/templates/minions.json | 115 -- .../generate-templates/templates/network.json | 109 -- .../generate-templates/templates/storage.json | 19 - contrib/terraform/aws/.gitignore | 3 - contrib/terraform/aws/README.md | 124 -- .../terraform/aws/create-infrastructure.tf | 185 --- .../terraform/aws/credentials.tfvars.example | 8 - contrib/terraform/aws/docs/aws_kubespray.png | Bin 116455 -> 0 bytes contrib/terraform/aws/modules/iam/main.tf | 141 --- contrib/terraform/aws/modules/iam/outputs.tf | 7 - .../terraform/aws/modules/iam/variables.tf | 3 - contrib/terraform/aws/modules/nlb/main.tf | 41 - contrib/terraform/aws/modules/nlb/outputs.tf | 11 - .../terraform/aws/modules/nlb/variables.tf | 30 - contrib/terraform/aws/modules/vpc/main.tf | 137 --- contrib/terraform/aws/modules/vpc/outputs.tf | 19 - .../terraform/aws/modules/vpc/variables.tf | 27 - contrib/terraform/aws/output.tf | 27 - .../aws/sample-inventory/cluster.tfvars | 59 - .../terraform/aws/sample-inventory/group_vars | 1 - contrib/terraform/aws/templates/inventory.tpl | 27 - contrib/terraform/aws/terraform.tfvars | 43 - .../terraform/aws/terraform.tfvars.example | 33 - contrib/terraform/aws/variables.tf | 143 --- contrib/terraform/exoscale/README.md | 152 --- contrib/terraform/exoscale/default.tfvars | 65 - contrib/terraform/exoscale/main.tf | 49 - .../modules/kubernetes-cluster/main.tf | 191 --- .../modules/kubernetes-cluster/output.tf | 31 - .../templates/cloud-init.tmpl | 52 - .../modules/kubernetes-cluster/variables.tf | 42 - .../modules/kubernetes-cluster/versions.tf | 9 - contrib/terraform/exoscale/output.tf | 15 - .../exoscale/sample-inventory/cluster.tfvars | 65 - .../exoscale/sample-inventory/group_vars | 1 - .../exoscale/templates/inventory.tpl | 19 - contrib/terraform/exoscale/variables.tf | 46 - contrib/terraform/exoscale/versions.tf | 15 - contrib/terraform/gcp/README.md | 104 -- contrib/terraform/gcp/generate-inventory.sh | 76 -- contrib/terraform/gcp/main.tf | 39 - .../gcp/modules/kubernetes-cluster/main.tf | 421 ------- .../gcp/modules/kubernetes-cluster/output.tf | 27 - .../modules/kubernetes-cluster/variables.tf | 86 -- contrib/terraform/gcp/output.tf | 15 - contrib/terraform/gcp/tfvars.json | 63 - contrib/terraform/gcp/variables.tf | 108 -- contrib/terraform/group_vars | 1 - contrib/terraform/hetzner/README.md | 122 -- contrib/terraform/hetzner/default.tfvars | 46 - contrib/terraform/hetzner/main.tf | 57 - .../kubernetes-cluster-flatcar/main.tf | 144 --- .../kubernetes-cluster-flatcar/outputs.tf | 29 - .../templates/machine.yaml.tmpl | 19 - .../kubernetes-cluster-flatcar/variables.tf | 60 - .../kubernetes-cluster-flatcar/versions.tf | 14 - .../modules/kubernetes-cluster/main.tf | 122 -- .../modules/kubernetes-cluster/output.tf | 27 - .../templates/cloud-init.tmpl | 16 - .../modules/kubernetes-cluster/variables.tf | 44 - .../modules/kubernetes-cluster/versions.tf | 9 - contrib/terraform/hetzner/output.tf | 7 - .../hetzner/sample-inventory/cluster.tfvars | 46 - .../hetzner/sample-inventory/group_vars | 1 - .../terraform/hetzner/templates/inventory.tpl | 19 - contrib/terraform/hetzner/variables.tf | 56 - contrib/terraform/hetzner/versions.tf | 12 - contrib/terraform/nifcloud/.gitignore | 5 - contrib/terraform/nifcloud/README.md | 138 --- .../terraform/nifcloud/generate-inventory.sh | 64 - contrib/terraform/nifcloud/main.tf | 36 - .../modules/kubernetes-cluster/main.tf | 301 ----- .../modules/kubernetes-cluster/outputs.tf | 48 - .../templates/userdata.tftpl | 45 - .../modules/kubernetes-cluster/terraform.tf | 9 - .../modules/kubernetes-cluster/variables.tf | 81 -- contrib/terraform/nifcloud/output.tf | 3 - .../nifcloud/sample-inventory/cluster.tfvars | 22 - .../nifcloud/sample-inventory/group_vars | 1 - contrib/terraform/nifcloud/terraform.tf | 9 - contrib/terraform/nifcloud/variables.tf | 77 -- contrib/terraform/openstack/.gitignore | 5 - contrib/terraform/openstack/README.md | 801 ------------ contrib/terraform/openstack/hosts | 1 - contrib/terraform/openstack/kubespray.tf | 155 --- .../compute/ansible_bastion_template.txt | 1 - .../openstack/modules/compute/main.tf | 1092 ----------------- .../openstack/modules/compute/outputs.tf | 3 - .../compute/templates/cloudinit.yaml.tmpl | 54 - .../openstack/modules/compute/variables.tf | 269 ---- .../openstack/modules/compute/versions.tf | 8 - .../terraform/openstack/modules/ips/main.tf | 46 - .../openstack/modules/ips/outputs.tf | 48 - .../openstack/modules/ips/variables.tf | 27 - .../openstack/modules/ips/versions.tf | 11 - .../openstack/modules/loadbalancer/main.tf | 54 - .../modules/loadbalancer/variables.tf | 15 - .../modules/loadbalancer/versions.tf | 8 - .../openstack/modules/network/main.tf | 34 - .../openstack/modules/network/outputs.tf | 15 - .../openstack/modules/network/variables.tf | 21 - .../openstack/modules/network/versions.tf | 8 - .../openstack/sample-inventory/cluster.tfvars | 89 -- .../openstack/sample-inventory/group_vars | 1 - contrib/terraform/openstack/variables.tf | 411 ------- contrib/terraform/openstack/versions.tf | 9 - contrib/terraform/terraform.py | 475 ------- contrib/terraform/upcloud/README.md | 173 --- .../terraform/upcloud/cluster-settings.tfvars | 198 --- contrib/terraform/upcloud/main.tf | 65 - .../modules/kubernetes-cluster/main.tf | 904 -------------- .../modules/kubernetes-cluster/output.tf | 15 - .../modules/kubernetes-cluster/variables.tf | 202 --- .../modules/kubernetes-cluster/versions.tf | 10 - contrib/terraform/upcloud/output.tf | 16 - .../upcloud/sample-inventory/cluster.tfvars | 149 --- .../upcloud/sample-inventory/group_vars | 1 - .../terraform/upcloud/templates/inventory.tpl | 33 - contrib/terraform/upcloud/variables.tf | 259 ---- contrib/terraform/upcloud/versions.tf | 10 - contrib/terraform/vsphere/README.md | 128 -- contrib/terraform/vsphere/default.tfvars | 38 - contrib/terraform/vsphere/main.tf | 100 -- .../modules/kubernetes-cluster/main.tf | 149 --- .../modules/kubernetes-cluster/output.tf | 15 - .../templates/cloud-init.tpl | 6 - .../kubernetes-cluster/templates/metadata.tpl | 14 - .../templates/vapp-cloud-init.tpl | 24 - .../modules/kubernetes-cluster/variables.tf | 43 - .../modules/kubernetes-cluster/versions.tf | 9 - contrib/terraform/vsphere/output.tf | 31 - .../vsphere/sample-inventory/cluster.tfvars | 33 - .../vsphere/sample-inventory/group_vars | 1 - .../terraform/vsphere/templates/inventory.tpl | 17 - contrib/terraform/vsphere/variables.tf | 101 -- contrib/terraform/vsphere/versions.tf | 9 - index.html | 48 - inventory/2SpeedLab/group_vars/all/all.yml | 139 +++ .../2SpeedLab/group_vars/all/containerd.yml | 61 + inventory/2SpeedLab/group_vars/all/coreos.yml | 2 + inventory/2SpeedLab/group_vars/all/cri-o.yml | 9 + .../group_vars/all/database_nodes.yml | 8 + inventory/2SpeedLab/group_vars/all/etcd.yml | 16 + .../2SpeedLab/group_vars/all/offline.yml | 114 ++ .../group_vars/k8s_cluster/addons.yml | 248 ++++ .../group_vars/k8s_cluster/k8s-cluster.yml | 372 ++++++ .../group_vars/k8s_cluster/k8s-net-calico.yml | 126 ++ .../group_vars/k8s_cluster/k8s-net-cilium.yml | 390 ++++++ .../k8s_cluster/k8s-net-custom-cni.yml | 51 + .../k8s_cluster/k8s-net-flannel.yml | 18 + .../k8s_cluster/k8s-net-kube-router.yml | 67 + .../k8s_cluster/k8s-net-macvlan.yml | 6 + .../k8s_cluster/kube_control_plane.yml | 11 + inventory/2SpeedLab/inventory.ini | 28 + logo/LICENSE | 1 - logo/logo-clear.png | Bin 4679 -> 0 bytes logo/logo-clear.svg | 80 -- logo/logo-dark.png | Bin 6360 -> 0 bytes logo/logo-dark.svg | 83 -- logo/logo-text-clear.png | Bin 13074 -> 0 bytes logo/logo-text-clear.svg | 107 -- logo/logo-text-dark.png | Bin 13384 -> 0 bytes logo/logo-text-dark.svg | 110 -- logo/logo-text-mixed.png | Bin 16076 -> 0 bytes logo/logo-text-mixed.svg | 110 -- logo/logos.pdf | Bin 288304 -> 0 bytes logo/usage_guidelines.md | 16 - meta/runtime.yml | 2 +- pipeline.Dockerfile | 60 - playbooks/ansible_version.yml | 2 +- scripts/gitlab-runner.sh | 22 - test-infra/vagrant-docker/Dockerfile | 16 - test-infra/vagrant-docker/README.md | 24 - test-infra/vagrant-docker/build.sh | 13 - 230 files changed, 1669 insertions(+), 14391 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/enhancement.yaml delete mode 100644 .github/ISSUE_TEMPLATE/failing-test.yaml delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md delete mode 100644 .github/dependabot.yml delete mode 100644 .github/workflows/auto-label-os.yml delete mode 100644 .github/workflows/upgrade-patch-versions-schedule.yml delete mode 100644 .github/workflows/upgrade-patch-versions.yml delete mode 100644 .gitlab-ci.yml delete mode 100644 .gitlab-ci/build.yml delete mode 100644 .gitlab-ci/kubevirt.yml delete mode 100644 .gitlab-ci/lint.yml delete mode 100644 .gitlab-ci/molecule.yml delete mode 100644 .gitlab-ci/terraform.yml delete mode 100644 .gitlab-ci/vagrant.yml delete mode 100644 .mdlrc delete mode 100644 .nojekyll delete mode 100644 CHANGELOG.md delete mode 100644 CNAME delete mode 100644 CONTRIBUTING.md delete mode 100644 LICENSE delete mode 100644 OWNERS delete mode 100644 OWNERS_ALIASES delete mode 100644 RELEASE.md delete mode 100644 SECURITY_CONTACTS delete mode 100644 Vagrantfile delete mode 100644 code-of-conduct.md delete mode 100644 contrib/aws_iam/kubernetes-master-policy.json delete mode 100644 contrib/aws_iam/kubernetes-master-role.json delete mode 100644 contrib/aws_iam/kubernetes-minion-policy.json delete mode 100644 contrib/aws_iam/kubernetes-minion-role.json delete mode 100755 contrib/aws_inventory/kubespray-aws-inventory.py delete mode 100644 contrib/aws_inventory/requirements.txt delete mode 100644 contrib/azurerm/.gitignore delete mode 100644 contrib/azurerm/README.md delete mode 100755 contrib/azurerm/apply-rg.sh delete mode 100755 contrib/azurerm/clear-rg.sh delete mode 100755 contrib/azurerm/generate-inventory.sh delete mode 100644 contrib/azurerm/generate-inventory.yml delete mode 100644 contrib/azurerm/generate-inventory_2.yml delete mode 100644 contrib/azurerm/generate-templates.yml delete mode 100644 contrib/azurerm/group_vars/all delete mode 100644 contrib/azurerm/roles/generate-inventory/tasks/main.yml delete mode 100644 contrib/azurerm/roles/generate-inventory/templates/inventory.j2 delete mode 100644 contrib/azurerm/roles/generate-inventory_2/tasks/main.yml delete mode 100644 contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 delete mode 100644 contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 delete mode 100644 contrib/azurerm/roles/generate-templates/defaults/main.yml delete mode 100644 contrib/azurerm/roles/generate-templates/tasks/main.yml delete mode 100644 contrib/azurerm/roles/generate-templates/templates/availability-sets.json delete mode 100644 contrib/azurerm/roles/generate-templates/templates/bastion.json delete mode 100644 contrib/azurerm/roles/generate-templates/templates/clear-rg.json delete mode 100644 contrib/azurerm/roles/generate-templates/templates/masters.json delete mode 100644 contrib/azurerm/roles/generate-templates/templates/minions.json delete mode 100644 contrib/azurerm/roles/generate-templates/templates/network.json delete mode 100644 contrib/azurerm/roles/generate-templates/templates/storage.json delete mode 100644 contrib/terraform/aws/.gitignore delete mode 100644 contrib/terraform/aws/README.md delete mode 100644 contrib/terraform/aws/create-infrastructure.tf delete mode 100644 contrib/terraform/aws/credentials.tfvars.example delete mode 100644 contrib/terraform/aws/docs/aws_kubespray.png delete mode 100644 contrib/terraform/aws/modules/iam/main.tf delete mode 100644 contrib/terraform/aws/modules/iam/outputs.tf delete mode 100644 contrib/terraform/aws/modules/iam/variables.tf delete mode 100644 contrib/terraform/aws/modules/nlb/main.tf delete mode 100644 contrib/terraform/aws/modules/nlb/outputs.tf delete mode 100644 contrib/terraform/aws/modules/nlb/variables.tf delete mode 100644 contrib/terraform/aws/modules/vpc/main.tf delete mode 100644 contrib/terraform/aws/modules/vpc/outputs.tf delete mode 100644 contrib/terraform/aws/modules/vpc/variables.tf delete mode 100644 contrib/terraform/aws/output.tf delete mode 100644 contrib/terraform/aws/sample-inventory/cluster.tfvars delete mode 120000 contrib/terraform/aws/sample-inventory/group_vars delete mode 100644 contrib/terraform/aws/templates/inventory.tpl delete mode 100644 contrib/terraform/aws/terraform.tfvars delete mode 100644 contrib/terraform/aws/terraform.tfvars.example delete mode 100644 contrib/terraform/aws/variables.tf delete mode 100644 contrib/terraform/exoscale/README.md delete mode 100644 contrib/terraform/exoscale/default.tfvars delete mode 100644 contrib/terraform/exoscale/main.tf delete mode 100644 contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf delete mode 100644 contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf delete mode 100644 contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl delete mode 100644 contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf delete mode 100644 contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf delete mode 100644 contrib/terraform/exoscale/output.tf delete mode 100644 contrib/terraform/exoscale/sample-inventory/cluster.tfvars delete mode 120000 contrib/terraform/exoscale/sample-inventory/group_vars delete mode 100644 contrib/terraform/exoscale/templates/inventory.tpl delete mode 100644 contrib/terraform/exoscale/variables.tf delete mode 100644 contrib/terraform/exoscale/versions.tf delete mode 100644 contrib/terraform/gcp/README.md delete mode 100755 contrib/terraform/gcp/generate-inventory.sh delete mode 100644 contrib/terraform/gcp/main.tf delete mode 100644 contrib/terraform/gcp/modules/kubernetes-cluster/main.tf delete mode 100644 contrib/terraform/gcp/modules/kubernetes-cluster/output.tf delete mode 100644 contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf delete mode 100644 contrib/terraform/gcp/output.tf delete mode 100644 contrib/terraform/gcp/tfvars.json delete mode 100644 contrib/terraform/gcp/variables.tf delete mode 120000 contrib/terraform/group_vars delete mode 100644 contrib/terraform/hetzner/README.md delete mode 100644 contrib/terraform/hetzner/default.tfvars delete mode 100644 contrib/terraform/hetzner/main.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/main.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/outputs.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/templates/machine.yaml.tmpl delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/variables.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/versions.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf delete mode 100644 contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf delete mode 100644 contrib/terraform/hetzner/output.tf delete mode 100644 contrib/terraform/hetzner/sample-inventory/cluster.tfvars delete mode 120000 contrib/terraform/hetzner/sample-inventory/group_vars delete mode 100644 contrib/terraform/hetzner/templates/inventory.tpl delete mode 100644 contrib/terraform/hetzner/variables.tf delete mode 100644 contrib/terraform/hetzner/versions.tf delete mode 100644 contrib/terraform/nifcloud/.gitignore delete mode 100644 contrib/terraform/nifcloud/README.md delete mode 100755 contrib/terraform/nifcloud/generate-inventory.sh delete mode 100644 contrib/terraform/nifcloud/main.tf delete mode 100644 contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf delete mode 100644 contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf delete mode 100644 contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl delete mode 100644 contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf delete mode 100644 contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf delete mode 100644 contrib/terraform/nifcloud/output.tf delete mode 100644 contrib/terraform/nifcloud/sample-inventory/cluster.tfvars delete mode 120000 contrib/terraform/nifcloud/sample-inventory/group_vars delete mode 100644 contrib/terraform/nifcloud/terraform.tf delete mode 100644 contrib/terraform/nifcloud/variables.tf delete mode 100644 contrib/terraform/openstack/.gitignore delete mode 100644 contrib/terraform/openstack/README.md delete mode 120000 contrib/terraform/openstack/hosts delete mode 100644 contrib/terraform/openstack/kubespray.tf delete mode 100644 contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt delete mode 100644 contrib/terraform/openstack/modules/compute/main.tf delete mode 100644 contrib/terraform/openstack/modules/compute/outputs.tf delete mode 100644 contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl delete mode 100644 contrib/terraform/openstack/modules/compute/variables.tf delete mode 100644 contrib/terraform/openstack/modules/compute/versions.tf delete mode 100644 contrib/terraform/openstack/modules/ips/main.tf delete mode 100644 contrib/terraform/openstack/modules/ips/outputs.tf delete mode 100644 contrib/terraform/openstack/modules/ips/variables.tf delete mode 100644 contrib/terraform/openstack/modules/ips/versions.tf delete mode 100644 contrib/terraform/openstack/modules/loadbalancer/main.tf delete mode 100644 contrib/terraform/openstack/modules/loadbalancer/variables.tf delete mode 100644 contrib/terraform/openstack/modules/loadbalancer/versions.tf delete mode 100644 contrib/terraform/openstack/modules/network/main.tf delete mode 100644 contrib/terraform/openstack/modules/network/outputs.tf delete mode 100644 contrib/terraform/openstack/modules/network/variables.tf delete mode 100644 contrib/terraform/openstack/modules/network/versions.tf delete mode 100644 contrib/terraform/openstack/sample-inventory/cluster.tfvars delete mode 120000 contrib/terraform/openstack/sample-inventory/group_vars delete mode 100644 contrib/terraform/openstack/variables.tf delete mode 100644 contrib/terraform/openstack/versions.tf delete mode 100755 contrib/terraform/terraform.py delete mode 100644 contrib/terraform/upcloud/README.md delete mode 100644 contrib/terraform/upcloud/cluster-settings.tfvars delete mode 100644 contrib/terraform/upcloud/main.tf delete mode 100644 contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf delete mode 100644 contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf delete mode 100644 contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf delete mode 100644 contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf delete mode 100644 contrib/terraform/upcloud/output.tf delete mode 100644 contrib/terraform/upcloud/sample-inventory/cluster.tfvars delete mode 120000 contrib/terraform/upcloud/sample-inventory/group_vars delete mode 100644 contrib/terraform/upcloud/templates/inventory.tpl delete mode 100644 contrib/terraform/upcloud/variables.tf delete mode 100644 contrib/terraform/upcloud/versions.tf delete mode 100644 contrib/terraform/vsphere/README.md delete mode 100644 contrib/terraform/vsphere/default.tfvars delete mode 100644 contrib/terraform/vsphere/main.tf delete mode 100644 contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf delete mode 100644 contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf delete mode 100644 contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl delete mode 100644 contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl delete mode 100644 contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl delete mode 100644 contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf delete mode 100644 contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf delete mode 100644 contrib/terraform/vsphere/output.tf delete mode 100644 contrib/terraform/vsphere/sample-inventory/cluster.tfvars delete mode 120000 contrib/terraform/vsphere/sample-inventory/group_vars delete mode 100644 contrib/terraform/vsphere/templates/inventory.tpl delete mode 100644 contrib/terraform/vsphere/variables.tf delete mode 100644 contrib/terraform/vsphere/versions.tf delete mode 100644 index.html create mode 100644 inventory/2SpeedLab/group_vars/all/all.yml create mode 100644 inventory/2SpeedLab/group_vars/all/containerd.yml create mode 100644 inventory/2SpeedLab/group_vars/all/coreos.yml create mode 100644 inventory/2SpeedLab/group_vars/all/cri-o.yml create mode 100644 inventory/2SpeedLab/group_vars/all/database_nodes.yml create mode 100644 inventory/2SpeedLab/group_vars/all/etcd.yml create mode 100644 inventory/2SpeedLab/group_vars/all/offline.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-kube-router.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/kube_control_plane.yml create mode 100644 inventory/2SpeedLab/inventory.ini delete mode 100644 logo/LICENSE delete mode 100644 logo/logo-clear.png delete mode 100644 logo/logo-clear.svg delete mode 100644 logo/logo-dark.png delete mode 100644 logo/logo-dark.svg delete mode 100644 logo/logo-text-clear.png delete mode 100644 logo/logo-text-clear.svg delete mode 100644 logo/logo-text-dark.png delete mode 100644 logo/logo-text-dark.svg delete mode 100644 logo/logo-text-mixed.png delete mode 100644 logo/logo-text-mixed.svg delete mode 100644 logo/logos.pdf delete mode 100644 logo/usage_guidelines.md delete mode 100644 pipeline.Dockerfile delete mode 100644 scripts/gitlab-runner.sh delete mode 100644 test-infra/vagrant-docker/Dockerfile delete mode 100644 test-infra/vagrant-docker/README.md delete mode 100755 test-infra/vagrant-docker/build.sh diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml deleted file mode 100644 index 39411ce99b2..00000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.yaml +++ /dev/null @@ -1,147 +0,0 @@ ---- -name: Bug Report -description: Report a bug encountered while using Kubespray -labels: kind/bug -body: - - type: markdown - attributes: - value: | - Please, be ready for followup questions, and please respond in a timely - manner. If we can't reproduce a bug or think a feature already exists, we - might close your issue. If we're wrong, PLEASE feel free to reopen it and - explain why. - - type: textarea - id: problem - attributes: - label: What happened? - description: | - Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. - validations: - required: true - - type: textarea - id: expected - attributes: - label: What did you expect to happen? - validations: - required: true - - - type: textarea - id: repro - attributes: - label: How can we reproduce it (as minimally and precisely as possible)? - validations: - required: true - - - type: markdown - attributes: - value: '### Environment' - - - type: dropdown - id: os - attributes: - label: OS - options: - - 'RHEL 9' - - 'RHEL 8' - - 'Fedora 40' - - 'Ubuntu 24' - - 'Ubuntu 22' - - 'Ubuntu 20' - - 'Debian 12' - - 'Debian 11' - - 'Flatcar Container Linux' - - 'openSUSE Leap' - - 'openSUSE Tumbleweed' - - 'Oracle Linux 9' - - 'Oracle Linux 8' - - 'AlmaLinux 9' - - 'AlmaLinux 8' - - 'Rocky Linux 9' - - 'Rocky Linux 8' - - 'Amazon Linux 2' - - 'Kylin Linux Advanced Server V10' - - 'UOS Linux 20' - - 'openEuler 24' - - 'openEuler 22' - - 'openEuler 20' - - 'Other|Unsupported' - validations: - required: true - - - type: textarea - id: ansible_version - attributes: - label: Version of Ansible - placeholder: 'ansible --version' - validations: - required: true - - - type: input - id: python_version - attributes: - label: Version of Python - placeholder: 'python --version' - validations: - required: true - - - type: input - id: kubespray_version - attributes: - label: Version of Kubespray (commit) - placeholder: 'git rev-parse --short HEAD' - validations: - required: true - - - type: dropdown - id: network_plugin - attributes: - label: Network plugin used - options: - - calico - - cilium - - cni - - custom_cni - - flannel - - kube-ovn - - kube-router - - macvlan - - meta - - multus - - ovn4nfv - validations: - required: true - - - type: textarea - id: inventory - attributes: - label: Full inventory with variables - placeholder: 'ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"' - description: We recommend using snippets services like https://gist.github.com/ etc. - validations: - required: true - - - type: input - id: ansible_command - attributes: - label: Command used to invoke ansible - validations: - required: true - - - type: textarea - id: ansible_output - attributes: - label: Output of ansible run - description: We recommend using snippets services like https://gist.github.com/ etc. - validations: - required: true - - - type: textarea - id: anything_else - attributes: - label: Anything else we need to know - description: | - By running scripts/collect-info.yaml you can get a lot of useful informations. - Script can be started by: - ansible-playbook -i -u -e ansible_ssh_user= -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml - (If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python'). - After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 2ef2e3760da..00000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -blank_issues_enabled: false -contact_links: - - name: Support Request - url: https://kubernetes.slack.com/channels/kubespray - about: Support request or question relating to Kubernetes diff --git a/.github/ISSUE_TEMPLATE/enhancement.yaml b/.github/ISSUE_TEMPLATE/enhancement.yaml deleted file mode 100644 index c0232069e9b..00000000000 --- a/.github/ISSUE_TEMPLATE/enhancement.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Enhancement Request -description: Suggest an enhancement to the Kubespray project -labels: kind/feature -body: - - type: markdown - attributes: - value: Please only use this template for submitting enhancement requests - - type: textarea - id: what - attributes: - label: What would you like to be added - validations: - required: true - - type: textarea - id: why - attributes: - label: Why is this needed - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/failing-test.yaml b/.github/ISSUE_TEMPLATE/failing-test.yaml deleted file mode 100644 index 94eb1bb784e..00000000000 --- a/.github/ISSUE_TEMPLATE/failing-test.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: Failing Test -description: Report test failures in Kubespray CI jobs -labels: kind/failing-test -body: - - type: markdown - attributes: - value: Please only use this template for submitting reports about failing tests in Kubespray CI jobs - - type: textarea - id: failing_jobs - attributes: - label: Which jobs are failing ? - validations: - required: true - - - type: textarea - id: failing_tests - attributes: - label: Which tests are failing ? - validations: - required: true - - - type: input - id: since_when - attributes: - label: Since when has it been failing ? - validations: - required: true - - - type: textarea - id: failure_reason - attributes: - label: Reason for failure - description: If you don't know and have no guess, just put "Unknown" - validations: - required: true - - - type: textarea - id: anything_else - attributes: - label: Anything else we need to know diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 2a4d3c865af..00000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,44 +0,0 @@ - - -**What type of PR is this?** -> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: -> -> /kind api-change -> /kind bug -> /kind cleanup -> /kind design -> /kind documentation -> /kind failing-test -> /kind feature -> /kind flake - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: - -Fixes # - -**Special notes for your reviewer**: - -**Does this PR introduce a user-facing change?**: - -```release-note - -``` diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 74b909c05b9..00000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "pip" - directory: "/" - schedule: - interval: "weekly" - labels: - - dependencies - - release-note-none - groups: - molecule: - patterns: - - molecule - - molecule-plugins* - - package-ecosystem: "github-actions" - directory: "/" - labels: - - release-note-none - - ci-short - schedule: - interval: "weekly" diff --git a/.github/workflows/auto-label-os.yml b/.github/workflows/auto-label-os.yml deleted file mode 100644 index f9ebb3ed5c2..00000000000 --- a/.github/workflows/auto-label-os.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Issue labeler -on: - issues: - types: [opened] - -permissions: - contents: read - -jobs: - label-component: - runs-on: ubuntu-latest - permissions: - issues: write - - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - - - name: Parse issue form - uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e - id: issue-parser - with: - template-path: .github/ISSUE_TEMPLATE/bug-report.yaml - - - name: Set labels based on OS field - uses: redhat-plumbers-in-action/advanced-issue-labeler@0db433d412193574252480b4fc22f2e4319a4ea3 - with: - issue-form: ${{ steps.issue-parser.outputs.jsonString }} - section: os - block-list: | - None - Other - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/upgrade-patch-versions-schedule.yml b/.github/workflows/upgrade-patch-versions-schedule.yml deleted file mode 100644 index 470deb58151..00000000000 --- a/.github/workflows/upgrade-patch-versions-schedule.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Upgrade Kubespray components with new patches versions - all branches - -on: - schedule: - - cron: '22 2 * * *' # every day, 02:22 UTC - workflow_dispatch: - -permissions: {} -jobs: - get-releases-branches: - if: github.repository == 'kubernetes-sigs/kubespray' - runs-on: ubuntu-latest - outputs: - branches: ${{ steps.get-branches.outputs.data }} - steps: - - uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110 - id: get-branches - with: - query: | - query get_release_branches($owner:String!, $name:String!) { - repository(owner:$owner, name:$name) { - refs(refPrefix: "refs/heads/", - first: 1, # TODO increment once we have release branch with the new checksums format - query: "release-", - orderBy: { - field: ALPHABETICAL, - direction: DESC - }) { - nodes { - name - } - } - } - } - variables: | - owner: ${{ github.repository_owner }} - name: ${{ github.event.repository.name }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - update-versions: - needs: get-releases-branches - strategy: - fail-fast: false - matrix: - branch: - - name: ${{ github.event.repository.default_branch }} - - ${{ fromJSON(needs.get-releases-branches.outputs.branches).repository.refs.nodes }} - uses: ./.github/workflows/upgrade-patch-versions.yml - permissions: - contents: write - pull-requests: write - name: Update patch updates on ${{ matrix.branch.name }} - with: - branch: ${{ matrix.branch.name }} diff --git a/.github/workflows/upgrade-patch-versions.yml b/.github/workflows/upgrade-patch-versions.yml deleted file mode 100644 index abb41cd4cb0..00000000000 --- a/.github/workflows/upgrade-patch-versions.yml +++ /dev/null @@ -1,44 +0,0 @@ -on: - workflow_call: - inputs: - branch: - description: Which branch to update with new patch versions - default: master - required: true - type: string - -jobs: - update-patch-versions: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - with: - ref: ${{ inputs.branch }} - - uses: actions/setup-python@v5 - with: - python-version: '3.13' - cache: 'pip' - - run: pip install scripts/component_hash_update pre-commit - - run: update-hashes - env: - API_KEY: ${{ secrets.GITHUB_TOKEN }} - - uses: actions/cache@v4 - with: - key: pre-commit-hook-propagate - path: | - ~/.cache/pre-commit - - run: pre-commit run --all-files propagate-ansible-variables - continue-on-error: true - - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e - with: - commit-message: Patch versions updates - title: Patch versions updates - ${{ inputs.branch }} - labels: bot - branch: component_hash_update/${{ inputs.branch }} - sign-commits: true - body: | - /kind feature - - ```release-note - NONE - ``` diff --git a/.gitignore b/.gitignore index fa68d5606e9..62b9bf87dba 100644 --- a/.gitignore +++ b/.gitignore @@ -25,7 +25,6 @@ vagrant/ plugins/mitogen # Ansible inventory -inventory/* !inventory/local !inventory/sample inventory/*/artifacts/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index 24e3876985d..00000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -stages: - - build - - test - - deploy-part1 - - deploy-extended - -variables: - FAILFASTCI_NAMESPACE: 'kargo-ci' - GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray' - GIT_CONFIG_COUNT: 2 - GIT_CONFIG_KEY_0: user.email - GIT_CONFIG_VALUE_0: "ci@kubespray.io" - GIT_CONFIG_KEY_1: user.name - GIT_CONFIG_VALUE_1: "Kubespray CI" - ANSIBLE_FORCE_COLOR: "true" - MAGIC: "ci check this" - GS_ACCESS_KEY_ID: $GS_KEY - GS_SECRET_ACCESS_KEY: $GS_SECRET - CONTAINER_ENGINE: docker - GCE_PREEMPTIBLE: "false" - ANSIBLE_KEEP_REMOTE_FILES: "1" - ANSIBLE_CONFIG: ./tests/ansible.cfg - ANSIBLE_REMOTE_USER: kubespray - ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa - ANSIBLE_INVENTORY: /tmp/inventory - ANSIBLE_STDOUT_CALLBACK: "debug" - RESET_CHECK: "false" - REMOVE_NODE_CHECK: "false" - UPGRADE_TEST: "false" - MITOGEN_ENABLE: "false" - ANSIBLE_VERBOSITY: 2 - RECOVER_CONTROL_PLANE_TEST: "false" - RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]" - OPENTOFU_VERSION: v1.9.1 - PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - -before_script: - - ./tests/scripts/rebase.sh - - mkdir -p cluster-dump $ANSIBLE_INVENTORY - -.job: &job - tags: - - ffci - image: $PIPELINE_IMAGE - artifacts: - when: always - paths: - - cluster-dump/ - needs: - - pipeline-image - -.job-moderated: - extends: .job - needs: - - pipeline-image - - pre-commit # lint - - vagrant-validate # lint - -include: - - .gitlab-ci/build.yml - - .gitlab-ci/lint.yml - - .gitlab-ci/terraform.yml - - .gitlab-ci/kubevirt.yml - - .gitlab-ci/vagrant.yml - - .gitlab-ci/molecule.yml diff --git a/.gitlab-ci/build.yml b/.gitlab-ci/build.yml deleted file mode 100644 index 92304a2e388..00000000000 --- a/.gitlab-ci/build.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -pipeline-image: - cache: - key: $CI_COMMIT_REF_SLUG - paths: - - image-cache - tags: - - ffci - stage: build - image: moby/buildkit:rootless - variables: - BUILDKITD_FLAGS: --oci-worker-no-process-sandbox - CACHE_IMAGE: $CI_REGISTRY_IMAGE/pipeline:cache - # TODO: remove the override - # currently rebase.sh depends on bash (not available in the kaniko image) - # once we have a simpler rebase (which should be easy if the target branch ref is available as variable - # we'll be able to rebase here as well hopefully - before_script: - - mkdir -p ~/.docker - - echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > ~/.docker/config.json - script: - - | - buildctl-daemonless.sh build \ - --frontend dockerfile.v0 \ - --local context=$CI_PROJECT_DIR \ - --local dockerfile=$CI_PROJECT_DIR \ - --opt filename=pipeline.Dockerfile \ - --export-cache type=registry,ref=$CACHE_IMAGE \ - --import-cache type=registry,ref=$CACHE_IMAGE \ - --output type=image,name=$PIPELINE_IMAGE,push=true diff --git a/.gitlab-ci/kubevirt.yml b/.gitlab-ci/kubevirt.yml deleted file mode 100644 index c5ac51acc4e..00000000000 --- a/.gitlab-ci/kubevirt.yml +++ /dev/null @@ -1,153 +0,0 @@ ---- -.kubevirt: - extends: .job-moderated - interruptible: true - script: - - ansible-playbook tests/cloud_playbooks/create-kubevirt.yml - -c local -e @"tests/files/${TESTCASE}.yml" - - ./tests/scripts/testcases_run.sh - variables: - ANSIBLE_TIMEOUT: "120" - tags: - - ffci - needs: - - pipeline-image - -# TODO: generate testcases matrixes from the files in tests/files/ -# this is needed to avoid the need for PR rebasing when a job was added or removed in the target branch -# (currently, a removed job in the target branch breaks the tests, because the -# pipeline definition is parsed by gitlab before the rebase.sh script) -# CI template for PRs -pr: - stage: deploy-part1 - rules: - - if: $PR_LABELS =~ /.*ci-short.*/ - when: manual - allow_failure: true - - if: $CI_COMMIT_BRANCH =~ /^pr-.*$/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - extends: .kubevirt - parallel: - matrix: - - TESTCASE: - - almalinux9-crio - - almalinux9-kube-ovn - - debian11-calico-collection - - debian11-macvlan - - debian12-cilium - - debian13-cilium - - fedora39-kube-router - - openeuler24-calico - - rockylinux9-cilium - - ubuntu22-calico-all-in-one - - ubuntu22-calico-all-in-one-upgrade - - ubuntu24-calico-etcd-datastore - - ubuntu24-calico-all-in-one-hardening - - ubuntu24-cilium-sep - - ubuntu24-flannel-collection - - ubuntu24-kube-router-sep - - ubuntu24-kube-router-svc-proxy - - ubuntu24-ha-separate-etcd - - flatcar4081-calico - - fedora40-flannel-crio-collection-scale - -# The ubuntu24-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken -ubuntu24-calico-all-in-one: - stage: deploy-part1 - extends: .kubevirt - variables: - TESTCASE: ubuntu24-calico-all-in-one - rules: - - if: $CI_COMMIT_BRANCH =~ /^pr-.*$/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - -pr_full: - extends: .kubevirt - stage: deploy-extended - rules: - - if: $PR_LABELS =~ /.*ci-full.*/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - # Else run as manual - - when: manual - allow_failure: true - parallel: - matrix: - - TESTCASE: - - almalinux9-calico-ha-ebpf - - almalinux9-calico-nodelocaldns-secondary - - debian11-custom-cni - - debian11-kubelet-csr-approver - - debian12-custom-cni-helm - - fedora39-calico-swap-selinux - - fedora39-crio - - ubuntu24-calico-ha-wireguard - - ubuntu24-flannel-ha - - ubuntu24-flannel-ha-once - -# Need an update of the container image to use schema v2 -# update: quay.io/kubespray/vm-amazon-linux-2:latest -manual: - extends: pr_full - parallel: - matrix: - - TESTCASE: - - amazon-linux-2-all-in-one - rules: - - when: manual - allow_failure: true - -pr_extended: - extends: .kubevirt - stage: deploy-extended - rules: - - if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - parallel: - matrix: - - TESTCASE: - - almalinux9-calico - - almalinux9-calico-remove-node - - almalinux9-docker - - debian11-docker - - debian12-calico - - debian12-docker - - debian13-calico - - rockylinux9-calico - - ubuntu22-all-in-one-docker - - ubuntu24-all-in-one-docker - - ubuntu24-calico-all-in-one - - ubuntu24-calico-etcd-kubeadm - - ubuntu24-flannel - -# TODO: migrate to pr-full, fix the broken ones -periodic: - allow_failure: true - extends: .kubevirt - rules: - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - parallel: - matrix: - - TESTCASE: - - debian11-calico-upgrade - - debian11-calico-upgrade-once - - debian12-cilium-svc-proxy - - fedora39-calico-selinux - - fedora40-docker-calico - - ubuntu24-calico-etcd-kubeadm-upgrade-ha - - ubuntu24-calico-ha-recover - - ubuntu24-calico-ha-recover-noquorum diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml deleted file mode 100644 index 809ad09ab9e..00000000000 --- a/.gitlab-ci/lint.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -pre-commit: - stage: test - tags: - - ffci - image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9' - variables: - PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit - ANSIBLE_STDOUT_CALLBACK: default - script: - - pre-commit run --all-files --show-diff-on-failure - cache: - key: pre-commit-2 - paths: - - ${PRE_COMMIT_HOME} - when: 'always' - needs: [] - -vagrant-validate: - extends: .job - stage: test - tags: [ffci] - variables: - VAGRANT_VERSION: 2.3.7 - script: - - ./tests/scripts/vagrant-validate.sh diff --git a/.gitlab-ci/molecule.yml b/.gitlab-ci/molecule.yml deleted file mode 100644 index 23431566bf9..00000000000 --- a/.gitlab-ci/molecule.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -.molecule: - tags: [ffci] - rules: # run on ci-short as well - - if: $CI_COMMIT_BRANCH =~ /^pr-.*$/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - stage: deploy-part1 - image: $PIPELINE_IMAGE - needs: - - pipeline-image - script: - - ./tests/scripts/molecule_run.sh - after_script: - - rm -fr molecule_logs - - mkdir -p molecule_logs - - find ~/.cache/molecule/ \( -name '*.out' -o -name '*.err' \) -type f | xargs tar -uf molecule_logs/molecule.tar - - gzip molecule_logs/molecule.tar - artifacts: - when: always - paths: - - molecule_logs/ - -molecule: - extends: .molecule - script: - - ./tests/scripts/molecule_run.sh -i $ROLE - parallel: - matrix: - - ROLE: - - container-engine/cri-dockerd - - container-engine/containerd - - container-engine/cri-o - - container-engine/gvisor - - container-engine/youki - - adduser - - bastion-ssh-config - - bootstrap_os - -molecule_full: - allow_failure: true - rules: - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - extends: molecule - parallel: - matrix: - - ROLE: - # FIXME : tests below are perma-failing - - container-engine/kata-containers diff --git a/.gitlab-ci/terraform.yml b/.gitlab-ci/terraform.yml deleted file mode 100644 index b5d19946425..00000000000 --- a/.gitlab-ci/terraform.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -# Tests for contrib/terraform/ -.terraform_install: - extends: .job - needs: - - pipeline-image - variables: - TF_VAR_public_key_path: "${ANSIBLE_PRIVATE_KEY_FILE}.pub" - TF_VAR_ssh_private_key_path: $ANSIBLE_PRIVATE_KEY_FILE - CLUSTER: $CI_COMMIT_REF_NAME - TERRAFORM_STATE_ROOT: $CI_PROJECT_DIR - stage: deploy-part1 - before_script: - - ./tests/scripts/rebase.sh - - mkdir -p cluster-dump $ANSIBLE_INVENTORY - - ./tests/scripts/opentofu_install.sh - - cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars . - - ln -rs -t $ANSIBLE_INVENTORY contrib/terraform/$PROVIDER/hosts - - tofu -chdir="contrib/terraform/$PROVIDER" init - -terraform_validate: - extends: .terraform_install - tags: [ffci] - only: ['master', /^pr-.*$/] - script: - - tofu -chdir="contrib/terraform/$PROVIDER" validate - - tofu -chdir="contrib/terraform/$PROVIDER" fmt -check -diff - stage: test - needs: - - pipeline-image - parallel: - matrix: - - PROVIDER: - - openstack - - aws - - exoscale - - hetzner - - vsphere - - upcloud - - nifcloud - -.terraform_apply: - extends: .terraform_install - tags: [ffci] - stage: deploy-extended - when: manual - only: [/^pr-.*$/] - variables: - ANSIBLE_INVENTORY_UNPARSED_FAILED: "true" - ANSIBLE_REMOTE_USER: ubuntu # the openstack terraform module does not handle custom user correctly - ANSIBLE_SSH_RETRIES: 15 - TF_VAR_ssh_user: $ANSIBLE_REMOTE_USER - TF_VAR_cluster_name: $CI_JOB_ID - script: - # Set Ansible config - - cp ansible.cfg ~/.ansible.cfg - - ssh-keygen -N '' -f $ANSIBLE_PRIVATE_KEY_FILE -t rsa - - mkdir -p contrib/terraform/$PROVIDER/group_vars - # Random subnet to avoid routing conflicts - - export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24" - - tofu -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1 - - tests/scripts/testcases_run.sh - after_script: - # Cleanup regardless of exit code - - tofu -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve - -# Elastx is generously donating resources for Kubespray on Openstack CI -# Contacts: @gix @bl0m1 -.elastx_variables: &elastx_variables - OS_AUTH_URL: https://ops.elastx.cloud:5000 - OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4 - OS_PROJECT_NAME: kubespray_ci - OS_USER_DOMAIN_NAME: Default - OS_PROJECT_DOMAIN_ID: default - OS_USERNAME: kubespray@root314.com - OS_REGION_NAME: se-sto - OS_INTERFACE: public - OS_IDENTITY_API_VERSION: "3" - TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df" - -tf-elastx_cleanup: - tags: [ffci] - image: python - variables: - <<: *elastx_variables - before_script: - - pip install -r scripts/openstack-cleanup/requirements.txt - script: - - ./scripts/openstack-cleanup/main.py - allow_failure: true - -tf-elastx_ubuntu20-calico: - extends: .terraform_apply - stage: deploy-part1 - when: on_success - allow_failure: true - variables: - <<: *elastx_variables - PROVIDER: openstack - ANSIBLE_TIMEOUT: "60" - TF_VAR_number_of_k8s_masters: "1" - TF_VAR_number_of_k8s_masters_no_floating_ip: "0" - TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0" - TF_VAR_number_of_etcd: "0" - TF_VAR_number_of_k8s_nodes: "1" - TF_VAR_number_of_k8s_nodes_no_floating_ip: "0" - TF_VAR_number_of_gfs_nodes_no_floating_ip: "0" - TF_VAR_number_of_bastions: "0" - TF_VAR_number_of_k8s_masters_no_etcd: "0" - TF_VAR_floatingip_pool: "elx-public1" - TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]' - TF_VAR_use_access_ip: "0" - TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828" - TF_VAR_network_name: "ci-$CI_JOB_ID" - TF_VAR_az_list: '["sto1"]' - TF_VAR_az_list_node: '["sto1"]' - TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2 - TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2 - TF_VAR_image: ubuntu-20.04-server-latest - TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]' diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml deleted file mode 100644 index cc29a98e658..00000000000 --- a/.gitlab-ci/vagrant.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -vagrant: - extends: .job-moderated - variables: - CI_PLATFORM: "vagrant" - SSH_USER: "vagrant" - VAGRANT_DEFAULT_PROVIDER: "libvirt" - KUBESPRAY_VAGRANT_CONFIG: tests/files/${TESTCASE}.rb - DOCKER_NAME: vagrant - VAGRANT_ANSIBLE_TAGS: facts - VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d" - PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" - tags: [ffci-vm-large] - image: quay.io/kubespray/vm-kubespray-ci:v13 - services: [] - before_script: - - echo $USER - - python3 -m venv citest - - source citest/bin/activate - - vagrant plugin expunge --reinstall --force --no-tty - - vagrant plugin install vagrant-libvirt - - pip install --no-compile --no-cache-dir pip -U - - pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt - - pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt - - ./tests/scripts/vagrant_clean.sh - script: - - vagrant up - - ./tests/scripts/testcases_run.sh - after_script: - - vagrant destroy -f - cache: - key: $CI_JOB_NAME_SLUG - paths: - - .vagrant.d/boxes - - .cache/pip - policy: pull-push # TODO: change to "pull" when not on main - stage: deploy-extended - rules: - - if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - parallel: - matrix: - - TESTCASE: - - ubuntu24-calico-dual-stack - - ubuntu24-calico-ipv6only-stack diff --git a/.mdlrc b/.mdlrc deleted file mode 100644 index 8ca55a8cee5..00000000000 --- a/.mdlrc +++ /dev/null @@ -1 +0,0 @@ -style "#{File.dirname(__FILE__)}/.md_style.rb" diff --git a/.nojekyll b/.nojekyll deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 13a4f6171db..00000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1 +0,0 @@ -# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases) diff --git a/CNAME b/CNAME deleted file mode 100644 index e5bd1ffa1bb..00000000000 --- a/CNAME +++ /dev/null @@ -1 +0,0 @@ -kubespray.io diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 08f2f947589..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,47 +0,0 @@ -# Contributing guidelines - -## How to become a contributor and submit your own code - -### Environment setup - -It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications) - -To install development dependencies you can set up a python virtual env with the necessary dependencies: - -```ShellSession -virtualenv venv -source venv/bin/activate -pip install -r tests/requirements.txt -ansible-galaxy install -r tests/requirements.yml -``` - -#### Linting - -Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR. - -```ShellSession -pre-commit install -pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified -``` - -#### Molecule - -[molecule](https://github.com/ansible-community/molecule) is designed to help the development and testing of Ansible roles. In Kubespray you can run it all for all roles with `./tests/scripts/molecule_run.sh` or for a specific role (that you are working with) with `molecule test` from the role directory (`cd roles/my-role`). - -When developing or debugging a role it can be useful to run `molecule create` and `molecule converge` separately. Then you can use `molecule login` to SSH into the test environment. - -#### Vagrant - -Vagrant with VirtualBox or libvirt driver helps you to quickly spin test clusters to test things end to end. See [README.md#vagrant](README.md) - -### Contributing A Patch - -1. Submit an issue describing your proposed change to the repo in question. -2. The [repo owners](OWNERS) will respond to your issue promptly. -3. Fork the desired repo, develop and test your code changes. -4. Install [pre-commit](https://pre-commit.com) and install it in your development repo. -5. Addess any pre-commit validation failures. -6. Sign the CNCF CLA () -7. Submit a pull request. -8. Work with the reviewers on their suggestions. -9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits () before final merger of your contribution. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 0b540e19a34..00000000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Kubespray - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/OWNERS b/OWNERS deleted file mode 100644 index e4ac17f4556..00000000000 --- a/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - kubespray-approvers -reviewers: - - kubespray-reviewers -emeritus_approvers: - - kubespray-emeritus_approvers diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES deleted file mode 100644 index ff747044e1a..00000000000 --- a/OWNERS_ALIASES +++ /dev/null @@ -1,27 +0,0 @@ -aliases: - kubespray-approvers: - - ant31 - - mzaian - - tico88612 - - vannten - - yankay - kubespray-reviewers: - - cyclinder - - erikjiang - - mrfreezeex - - mzaian - - tico88612 - - vannten - - yankay - kubespray-emeritus_approvers: - - atoms - - chadswen - - cristicalin - - floryut - - liupeng0518 - - luckysb - - mattymo - - miouge1 - - oomichi - - riverzhang - - woopstar diff --git a/README.md b/README.md index cb8ed2a876a..1257ec79d5d 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,9 @@ # Deploy a Production Ready Kubernetes Cluster -![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png) - -If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. -You can get your invite [here](http://slack.k8s.io/) - -- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_controllers/openstack.md), [vSphere](docs/cloud_controllers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** +## This is the k8s version for 2SpeedLab - **Highly available** cluster - **Composable** (Choice of the network plugin for instance) - Supports most popular **Linux distributions** -- **Continuous integration tests** ## Quick Start @@ -26,35 +20,6 @@ docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inve # Inside the container you may now run the kubespray playbooks: ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml ``` - -### Ansible - -#### Usage - -See [Getting started](/docs/getting_started/getting-started.md) - -#### Collection - -See [here](docs/ansible/ansible_collection.md) if you wish to use this repository as an Ansible collection - -### Vagrant - -For Vagrant we need to install Python dependencies for provisioning tasks. -Check that ``Python`` and ``pip`` are installed: - -```ShellSession -python -V && pip -V -``` - -If this returns the version of the software, you're good to go. If not, download and install Python from here - -Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible) -then run the following step: - -```ShellSession -vagrant up -``` - ## Documents - [Requirements](#requirements) @@ -205,24 +170,3 @@ See also [Network checker](docs/advanced/netcheck.md). - [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller. - [metallb](docs/ingress/metallb.md): the MetalLB bare-metal service LoadBalancer provider. - -## Community docs and resources - -- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/) -- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr -- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty -- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=CJ5G4GpqDy0) - -## Tools and projects on top of Kubespray - -- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst) -- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform) -- [Kubean](https://github.com/kubean-io/kubean) - -## CI Tests - -[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/pipelines) - -CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/). - -See the [test matrix](docs/developers/test_cases.md) for details. diff --git a/RELEASE.md b/RELEASE.md deleted file mode 100644 index 13282ef4d02..00000000000 --- a/RELEASE.md +++ /dev/null @@ -1,85 +0,0 @@ -# Release Process - -The Kubespray Project is released on an as-needed basis. The process is as follows: - -1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325) -1. At least one of the [approvers](OWNERS_ALIASES) must approve this release -1. (Only for major releases) The `kube_version_min_required` variable is set to `n-1` -1. (Only for major releases) Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables. -1. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details. -1. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes -1. (Only for major releases) An approver creates a release branch in the form `release-X.Y` -1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request. -1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request. -1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details. -1. The release issue is closed -1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released` -1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...` -1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance) - -## Major/minor releases and milestones - -* For major releases (vX.Y) Kubespray maintains one branch (`release-X.Y`). Minor releases (vX.Y.Z) are available only as tags. - -* Security patches and bugs might be backported. - -* Fixes for major releases (vX.Y) and minor releases (vX.Y.Z) are delivered - via maintenance releases (vX.Y.Z) and assigned to the corresponding open - [GitHub milestone](https://github.com/kubernetes-sigs/kubespray/milestones). - That milestone remains open for the major/minor releases support lifetime, - which ends once the milestone is closed. Then only a next major or minor release - can be done. - -* Kubespray major and minor releases are bound to the given `kube_version` major/minor - version numbers and other components' arbitrary versions, like etcd or network plugins. - Older or newer component versions are not supported and not tested for the given - release (even if included in the checksum variables, like `kubeadm_checksums`). - -* There is no unstable releases and no APIs, thus Kubespray doesn't follow - [semver](https://semver.org/). Every version describes only a stable release. - Breaking changes, if any introduced by changed defaults or non-contrib ansible roles' - playbooks, shall be described in the release notes. Other breaking changes, if any in - the contributed addons or bound versions of Kubernetes and other components, are - considered out of Kubespray scope and are up to the components' teams to deal with and - document. - -* Minor releases can change components' versions, but not the major `kube_version`. - Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0 - is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: 3.0.6`, - then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1 - and *any* changes to other components, like etcd v4, or calico 1.2.3. - And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively. - -## Release note creation - -You can create a release note with: - -```shell -export GITHUB_TOKEN= -export ORG=kubernetes-sigs -export REPO=kubespray -release-notes --start-sha --end-sha --dependencies=false --output=/tmp/kubespray-release-note --required-author="" -``` - -If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.). -It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note - -## Container image creation - -The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory: - -```shell -cd kubespray/ -nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z . -nerdctl push quay.io/kubespray/kubespray:vX.Y.Z -``` - -The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/: - -```shell -cd kubespray/test-infra/vagrant-docker/ -./build vX.Y.Z -``` - -Please note that the above operation requires the permission to push container images into quay.io/kubespray/. -If you don't have the permission, please ask it on the #kubespray-dev channel. diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS deleted file mode 100644 index 5b743285438..00000000000 --- a/SECURITY_CONTACTS +++ /dev/null @@ -1,15 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Committee to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ -floryut -ant31 -VannTen -yankay diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 833ca95ee17..00000000000 --- a/Vagrantfile +++ /dev/null @@ -1,349 +0,0 @@ -# -*- mode: ruby -*- -# # vi: set ft=ruby : - -# For help on using kubespray with vagrant, check out docs/developers/vagrant.md - -require 'fileutils' -require 'ipaddr' -require 'socket' - -Vagrant.require_version ">= 2.0.0" - -CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb') - -FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json" - -# Uniq disk UUID for libvirt -DISK_UUID = Time.now.utc.to_i - -SUPPORTED_OS = { - "flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]}, - "flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]}, - "flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]}, - "flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]}, - "ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"}, - "ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"}, - "ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"}, - "centos8" => {box: "centos/8", user: "vagrant"}, - "centos8-bento" => {box: "bento/centos-8", user: "vagrant"}, - "almalinux8" => {box: "almalinux/8", user: "vagrant"}, - "almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"}, - "almalinux9" => {box: "almalinux/9", user: "vagrant"}, - "rockylinux8" => {box: "rockylinux/8", user: "vagrant"}, - "rockylinux9" => {box: "rockylinux/9", user: "vagrant"}, - "fedora39" => {box: "fedora/39-cloud-base", user: "vagrant"}, - "fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"}, - "fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"}, - "fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"}, - "opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"}, - "opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"}, - "oraclelinux" => {box: "generic/oracle7", user: "vagrant"}, - "oraclelinux8" => {box: "generic/oracle8", user: "vagrant"}, - "rhel8" => {box: "generic/rhel8", user: "vagrant"}, - "debian11" => {box: "debian/bullseye64", user: "vagrant"}, - "debian12" => {box: "debian/bookworm64", user: "vagrant"}, -} - -if File.exist?(CONFIG) - require CONFIG -end - -# Defaults for config options defined in CONFIG -$num_instances ||= 3 -$instance_name_prefix ||= "k8s" -$vm_gui ||= false -$vm_memory ||= 2048 -$vm_cpus ||= 2 -$shared_folders ||= {} -$forwarded_ports ||= {} -$subnet ||= "172.18.8" -$subnet_ipv6 ||= "fd3c:b398:0698:0756" -$os ||= "ubuntu2004" -$network_plugin ||= "flannel" -$inventories ||= [] -# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni -$multi_networking ||= "False" -$download_run_once ||= "True" -$download_force_cache ||= "False" -# Modify those to have separate groups (for instance, to test separate etcd:) -# first_control_plane = 1 -# first_etcd = 4 -# control_plane_instances = 3 -# etcd_instances = 3 -$first_node ||= 1 -$first_control_plane ||= 1 -$first_etcd ||= 1 - -# The first three nodes are etcd servers -$etcd_instances ||= [$num_instances, 3].min -# The first two nodes are kube masters -$control_plane_instances ||= [$num_instances, 2].min -# All nodes are kube nodes -$kube_node_instances ||= $num_instances - $first_node + 1 - -# The following only works when using the libvirt provider -$kube_node_instances_with_disks ||= false -$kube_node_instances_with_disks_size ||= "20G" -$kube_node_instances_with_disks_number ||= 2 -$override_disk_size ||= false -$disk_size ||= "20GB" -$local_path_provisioner_enabled ||= "False" -$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/" -$libvirt_nested ||= false -# boolean or string (e.g. "-vvv") -$ansible_verbosity ||= false -$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || "" - -$vagrant_dir ||= File.join(File.dirname(__FILE__), ".vagrant") - -$playbook ||= "cluster.yml" -$extra_vars ||= {} - -host_vars = {} - -def collect_networks(subnet, subnet_ipv6) - Socket.getifaddrs.filter_map do |iface| - next unless iface&.netmask&.ip_address && iface.addr - - is_ipv6 = iface.addr.ipv6? - ip = IPAddr.new(iface.addr.ip_address.split('%').first) - ip_test = is_ipv6 ? IPAddr.new("#{subnet_ipv6}::0") : IPAddr.new("#{subnet}.0") - - prefix = IPAddr.new(iface.netmask.ip_address).to_i.to_s(2).count('1') - network = ip.mask(prefix) - - [IPAddr.new("#{network}/#{prefix}"), ip_test] - end -end - -def subnet_in_use?(network_ips) - network_ips.any? { |net, test_ip| net.include?(test_ip) && test_ip != net } -end - -network_ips = collect_networks($subnet, $subnet_ipv6) - -if subnet_in_use?(network_ips) - puts "Invalid subnet provided, subnet is already in use: #{$subnet}.0" - puts "Subnets in use: #{network_ips.inspect}" - exit 1 -end - -# throw error if os is not supported -if ! SUPPORTED_OS.key?($os) - puts "Unsupported OS: #{$os}" - puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}" - exit 1 -end - -$box = SUPPORTED_OS[$os][:box] - -if Vagrant.has_plugin?("vagrant-proxyconf") - $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost" - (1..$num_instances).each do |i| - $no_proxy += ",#{$subnet}.#{i+100}" - end -end - -Vagrant.configure("2") do |config| - - config.vm.box = $box - if SUPPORTED_OS[$os].has_key? :box_url - config.vm.box_url = SUPPORTED_OS[$os][:box_url] - end - config.ssh.username = SUPPORTED_OS[$os][:user] - - # plugin conflict - if Vagrant.has_plugin?("vagrant-vbguest") then - config.vbguest.auto_update = false - end - - # always use Vagrants insecure key - config.ssh.insert_key = false - - if ($override_disk_size) - unless Vagrant.has_plugin?("vagrant-disksize") - system "vagrant plugin install vagrant-disksize" - end - config.disksize.size = $disk_size - end - - (1..$num_instances).each do |i| - config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node| - - node.vm.hostname = vm_name - - if Vagrant.has_plugin?("vagrant-proxyconf") - node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || "" - node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || "" - node.proxy.no_proxy = $no_proxy - end - - ["vmware_fusion", "vmware_workstation"].each do |vmware| - node.vm.provider vmware do |v| - v.vmx['memsize'] = $vm_memory - v.vmx['numvcpus'] = $vm_cpus - end - end - - node.vm.provider :virtualbox do |vb| - vb.memory = $vm_memory - vb.cpus = $vm_cpus - vb.gui = $vm_gui - vb.linked_clone = true - vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM - vb.customize ["modifyvm", :id, "--audio", "none"] - end - - node.vm.provider :libvirt do |lv| - lv.nested = $libvirt_nested - lv.cpu_mode = "host-model" - lv.memory = $vm_memory - lv.cpus = $vm_cpus - lv.default_prefix = 'kubespray' - # Fix kernel panic on fedora 28 - if $os == "fedora" - lv.cpu_mode = "host-passthrough" - end - end - - if $kube_node_instances_with_disks - # Libvirt - driverletters = ('a'..'z').to_a - node.vm.provider :libvirt do |lv| - # always make /dev/sd{a/b/c} so that CI can ensure that - # virtualbox and libvirt will have the same devices to use for OSDs - (1..$kube_node_instances_with_disks_number).each do |d| - lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi" - end - end - node.vm.provider :virtualbox do |vb| - # always make /dev/sd{a/b/c} so that CI can ensure that - # virtualbox and libvirt will have the same devices to use for OSDs - (1..$kube_node_instances_with_disks_number).each do |d| - vb.customize ['createhd', '--filename', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--size', $kube_node_instances_with_disks_size] # 10GB disk - vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', d, '--device', 0, '--type', 'hdd', '--medium', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--nonrotational', 'on', '--mtype', 'normal'] - end - end - end - - if $expose_docker_tcp - node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true - end - - $forwarded_ports.each do |guest, host| - node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true - end - - if ["rhel8"].include? $os - # Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot - # be installed until the host is registered with a valid Red Hat support subscription - node.vm.synced_folder ".", "/vagrant", disabled: false - $shared_folders.each do |src, dst| - node.vm.synced_folder src, dst - end - else - node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv'] - $shared_folders.each do |src, dst| - node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] - end - end - - ip = "#{$subnet}.#{i+100}" - ip6 = "#{$subnet_ipv6}::#{i+100}" - node.vm.network :private_network, - :ip => ip, - :libvirt__guest_ipv6 => 'yes', - :libvirt__ipv6_address => ip6, - :libvirt__ipv6_prefix => "64", - :libvirt__forward_mode => "none", - :libvirt__dhcp_enabled => false - - # libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64) - # add default route for detect ansible_default_ipv6 - # TODO: fix libvirt__ipv6 or use $subnet in shell - config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true" - - # Disable swap for each vm - node.vm.provision "shell", inline: "swapoff -a" - - # ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that. - if ["ubuntu2004", "ubuntu2204"].include? $os - node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf" - node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf" - end - # Hack for fedora39/40 to get the IP address of the second interface - if ["fedora39", "fedora40", "fedora39-arm64", "fedora40-arm64"].include? $os - config.vm.provision "shell", inline: <<-SHELL - nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)/24 - nmcli conn modify 'Wired connection 2' ipv4.method manual - service NetworkManager restart - SHELL - end - - - # Rockylinux boxes needs UEFI - if ["rockylinux8", "rockylinux9"].include? $os - config.vm.provider "libvirt" do |domain| - domain.loader = "/usr/share/OVMF/x64/OVMF_CODE.fd" - end - end - - # Disable firewalld on oraclelinux/redhat vms - if ["oraclelinux","oraclelinux8", "rhel8","rockylinux8"].include? $os - node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld" - end - - host_vars[vm_name] = { - "ip": ip, - "flannel_interface": "eth1", - "kube_network_plugin": $network_plugin, - "kube_network_plugin_multus": $multi_networking, - "download_run_once": $download_run_once, - "download_localhost": "False", - "download_cache_dir": ENV['HOME'] + "/kubespray_cache", - # Make kubespray cache even when download_run_once is false - "download_force_cache": $download_force_cache, - # Keeping the cache on the nodes can improve provisioning speed while debugging kubespray - "download_keep_remote_cache": "False", - "docker_rpm_keepcache": "1", - # These two settings will put kubectl and admin.config in $inventory/artifacts - "kubeconfig_localhost": "True", - "kubectl_localhost": "True", - "local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}", - "local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}", - "ansible_ssh_user": SUPPORTED_OS[$os][:user], - "ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"), - "unsafe_show_logs": "True" - } - - # Only execute the Ansible provisioner once, when all the machines are up and ready. - # And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh - if i == $num_instances - node.vm.provision "ansible" do |ansible| - ansible.playbook = $playbook - ansible.compatibility_mode = "2.0" - ansible.verbose = $ansible_verbosity - ansible.become = true - ansible.limit = "all,localhost" - ansible.host_key_checking = false - ansible.raw_arguments = ["--forks=#{$num_instances}", - "--flush-cache", - "-e ansible_become_pass=vagrant"] + - $inventories.map {|inv| ["-i", inv]}.flatten - ansible.host_vars = host_vars - ansible.extra_vars = $extra_vars - if $ansible_tags != "" - ansible.tags = [$ansible_tags] - end - ansible.groups = { - "etcd" => ["#{$instance_name_prefix}-[#{$first_etcd}:#{$etcd_instances + $first_etcd - 1}]"], - "kube_control_plane" => ["#{$instance_name_prefix}-[#{$first_control_plane}:#{$control_plane_instances + $first_control_plane - 1}]"], - "kube_node" => ["#{$instance_name_prefix}-[#{$first_node}:#{$kube_node_instances + $first_node - 1}]"], - "k8s_cluster:children" => ["kube_control_plane", "kube_node"], - } - end - end - - end - end -end diff --git a/code-of-conduct.md b/code-of-conduct.md deleted file mode 100644 index 0d15c00cf32..00000000000 --- a/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kubernetes Community Code of Conduct - -Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/contrib/aws_iam/kubernetes-master-policy.json b/contrib/aws_iam/kubernetes-master-policy.json deleted file mode 100644 index e5cbaea8039..00000000000 --- a/contrib/aws_iam/kubernetes-master-policy.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["ec2:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["elasticloadbalancing:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["route53:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::kubernetes-*" - ] - } - ] -} diff --git a/contrib/aws_iam/kubernetes-master-role.json b/contrib/aws_iam/kubernetes-master-role.json deleted file mode 100644 index 66d5de1d5ae..00000000000 --- a/contrib/aws_iam/kubernetes-master-role.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { "Service": "ec2.amazonaws.com"}, - "Action": "sts:AssumeRole" - } - ] -} diff --git a/contrib/aws_iam/kubernetes-minion-policy.json b/contrib/aws_iam/kubernetes-minion-policy.json deleted file mode 100644 index af81e98c824..00000000000 --- a/contrib/aws_iam/kubernetes-minion-policy.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::kubernetes-*" - ] - }, - { - "Effect": "Allow", - "Action": "ec2:Describe*", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:AttachVolume", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:DetachVolume", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": ["route53:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] -} diff --git a/contrib/aws_iam/kubernetes-minion-role.json b/contrib/aws_iam/kubernetes-minion-role.json deleted file mode 100644 index 66d5de1d5ae..00000000000 --- a/contrib/aws_iam/kubernetes-minion-role.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { "Service": "ec2.amazonaws.com"}, - "Action": "sts:AssumeRole" - } - ] -} diff --git a/contrib/aws_inventory/kubespray-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py deleted file mode 100755 index 7527c683855..00000000000 --- a/contrib/aws_inventory/kubespray-aws-inventory.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -import boto3 -import os -import argparse -import json - -class SearchEC2Tags(object): - - def __init__(self): - self.parse_args() - if self.args.list: - self.search_tags() - if self.args.host: - data = {} - print(json.dumps(data, indent=2)) - - def parse_args(self): - - ##Check if VPC_VISIBILITY is set, if not default to private - if "VPC_VISIBILITY" in os.environ: - self.vpc_visibility = os.environ['VPC_VISIBILITY'] - else: - self.vpc_visibility = "private" - - ##Support --list and --host flags. We largely ignore the host one. - parser = argparse.ArgumentParser() - parser.add_argument('--list', action='store_true', default=False, help='List instances') - parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - def search_tags(self): - hosts = {} - hosts['_meta'] = { 'hostvars': {} } - - ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. - for group in ["kube_control_plane", "kube_node", "etcd"]: - hosts[group] = [] - tag_key = "kubespray-role" - tag_value = ["*"+group+"*"] - region = os.environ['AWS_REGION'] - - ec2 = boto3.resource('ec2', region) - filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}] - cluster_name = os.getenv('CLUSTER_NAME') - if cluster_name: - filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]}) - instances = ec2.instances.filter(Filters=filters) - for instance in instances: - - ##Suppose default vpc_visibility is private - dns_name = instance.private_dns_name - ansible_host = { - 'ansible_ssh_host': instance.private_ip_address - } - - ##Override when vpc_visibility actually is public - if self.vpc_visibility == "public": - dns_name = instance.public_dns_name - ansible_host = { - 'ansible_ssh_host': instance.public_ip_address - } - - ##Set when instance actually has node_labels - node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags)) - if node_labels_tag: - ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ]) - - ##Set when instance actually has node_taints - node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags)) - if node_taints_tag: - ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ]) - - hosts[group].append(dns_name) - hosts['_meta']['hostvars'][dns_name] = ansible_host - - hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']} - print(json.dumps(hosts, sort_keys=True, indent=2)) - -SearchEC2Tags() diff --git a/contrib/aws_inventory/requirements.txt b/contrib/aws_inventory/requirements.txt deleted file mode 100644 index 179d5de54c6..00000000000 --- a/contrib/aws_inventory/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -boto3 # Apache-2.0 diff --git a/contrib/azurerm/.gitignore b/contrib/azurerm/.gitignore deleted file mode 100644 index 3ef07f87460..00000000000 --- a/contrib/azurerm/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.generated -/inventory diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md deleted file mode 100644 index 8869ec09114..00000000000 --- a/contrib/azurerm/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Kubernetes on Azure with Azure Resource Group Templates - -Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates) - -## Status - -This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified -Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course). - -## Requirements - -- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) -- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest) -- Dedicated Resource Group created in the Azure Portal or through azure-cli - -## Configuration through group_vars/all - -You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally -unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public -key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes -experience. - -## Bastion host - -You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated -templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option -also removes all public IPs from all other VMs. - -## Generating and applying - -To generate and apply the templates, call: - -```shell -./apply-rg.sh -``` - -If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will -take care about creating/modifying whatever is needed. - -## Clearing a resource group - -If you need to delete all resources from a resource group, simply call: - -```shell -./clear-rg.sh -``` - -**WARNING** this really deletes everything from your resource group, including everything that was later created by you! - -## Installing Ansible and the dependencies - -Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible) - -## Generating an inventory for kubespray - -After you have applied the templates, you can generate an inventory with this call: - -```shell -./generate-inventory.sh -``` - -It will create the file ./inventory which can then be used with kubespray, e.g.: - -```shell -cd kubespray-root-dir -ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml -``` diff --git a/contrib/azurerm/apply-rg.sh b/contrib/azurerm/apply-rg.sh deleted file mode 100755 index 2348169d4ef..00000000000 --- a/contrib/azurerm/apply-rg.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -set -e - -AZURE_RESOURCE_GROUP="$1" - -if [ "$AZURE_RESOURCE_GROUP" == "" ]; then - echo "AZURE_RESOURCE_GROUP is missing" - exit 1 -fi - -ansible-playbook generate-templates.yml - -az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP diff --git a/contrib/azurerm/clear-rg.sh b/contrib/azurerm/clear-rg.sh deleted file mode 100755 index a2004553799..00000000000 --- a/contrib/azurerm/clear-rg.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -e - -AZURE_RESOURCE_GROUP="$1" - -if [ "$AZURE_RESOURCE_GROUP" == "" ]; then - echo "AZURE_RESOURCE_GROUP is missing" - exit 1 -fi - -ansible-playbook generate-templates.yml - -az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete diff --git a/contrib/azurerm/generate-inventory.sh b/contrib/azurerm/generate-inventory.sh deleted file mode 100755 index b3eb9c0fe64..00000000000 --- a/contrib/azurerm/generate-inventory.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -set -e - -AZURE_RESOURCE_GROUP="$1" - -if [ "$AZURE_RESOURCE_GROUP" == "" ]; then - echo "AZURE_RESOURCE_GROUP is missing" - exit 1 -fi -# check if azure cli 2.0 exists else use azure cli 1.0 -if az &>/dev/null; then - ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP" -elif azure &>/dev/null; then - ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP" -else - echo "Azure cli not found" -fi diff --git a/contrib/azurerm/generate-inventory.yml b/contrib/azurerm/generate-inventory.yml deleted file mode 100644 index 59e1e90b6a4..00000000000 --- a/contrib/azurerm/generate-inventory.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Generate Azure inventory - hosts: localhost - gather_facts: false - roles: - - generate-inventory diff --git a/contrib/azurerm/generate-inventory_2.yml b/contrib/azurerm/generate-inventory_2.yml deleted file mode 100644 index 8c2cbff86b5..00000000000 --- a/contrib/azurerm/generate-inventory_2.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Generate Azure inventory - hosts: localhost - gather_facts: false - roles: - - generate-inventory_2 diff --git a/contrib/azurerm/generate-templates.yml b/contrib/azurerm/generate-templates.yml deleted file mode 100644 index f2cf231bc4d..00000000000 --- a/contrib/azurerm/generate-templates.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Generate Azure templates - hosts: localhost - gather_facts: false - roles: - - generate-templates diff --git a/contrib/azurerm/group_vars/all b/contrib/azurerm/group_vars/all deleted file mode 100644 index 44dc1e384ee..00000000000 --- a/contrib/azurerm/group_vars/all +++ /dev/null @@ -1,51 +0,0 @@ - -# Due to some Azure limitations (ex:- Storage Account's name must be unique), -# this name must be globally unique - it will be used as a prefix for azure components -cluster_name: example - -# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion -# node that can be used to access the masters and minions -use_bastion: false - -# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion..cloudapp.azure.com. -# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host. -# bastion_domain_prefix: k8s-bastion - -number_of_k8s_masters: 3 -number_of_k8s_nodes: 3 - -masters_vm_size: Standard_A2 -masters_os_disk_size: 1000 - -minions_vm_size: Standard_A2 -minions_os_disk_size: 1000 - -admin_username: devops -admin_password: changeme - -# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines -ssh_public_keys: - - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy" - -# Disable using ssh using password. Change it to false to allow to connect to ssh by password -disablePasswordAuthentication: true - -# Azure CIDRs -azure_vnet_cidr: 10.0.0.0/8 -azure_admin_cidr: 10.241.2.0/24 -azure_masters_cidr: 10.0.4.0/24 -azure_minions_cidr: 10.240.0.0/16 - -# Azure loadbalancer port to use to access your cluster -kube_apiserver_port: 6443 - -# Azure Netwoking and storage naming to use with inventory/all.yml -#azure_virtual_network_name: KubeVNET -#azure_subnet_admin_name: ad-subnet -#azure_subnet_masters_name: master-subnet -#azure_subnet_minions_name: minion-subnet -#azure_route_table_name: routetable -#azure_security_group_name: secgroup - -# Storage types available are: "Standard_LRS","Premium_LRS" -#azure_storage_account_type: Standard_LRS diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml deleted file mode 100644 index f93f1b6b281..00000000000 --- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- name: Query Azure VMs - command: azure vm list-ip-address --json {{ azure_resource_group }} - register: vm_list_cmd - -- name: Set vm_list - set_fact: - vm_list: "{{ vm_list_cmd.stdout }}" - -- name: Generate inventory - template: - src: inventory.j2 - dest: "{{ playbook_dir }}/inventory" - mode: "0644" diff --git a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 deleted file mode 100644 index 6c5feb2cd4c..00000000000 --- a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 +++ /dev/null @@ -1,33 +0,0 @@ - -{% for vm in vm_list %} -{% if not use_bastion or vm.name == 'bastion' %} -{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }} -{% else %} -{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }} -{% endif %} -{% endfor %} - -[kube_control_plane] -{% for vm in vm_list %} -{% if 'kube_control_plane' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[etcd] -{% for vm in vm_list %} -{% if 'etcd' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[kube_node] -{% for vm in vm_list %} -{% if 'kube_node' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[k8s_cluster:children] -kube_node -kube_control_plane diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml deleted file mode 100644 index 267755b1285..00000000000 --- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- - -- name: Query Azure VMs IPs - command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }} - register: vm_ip_list_cmd - -- name: Query Azure VMs Roles - command: az vm list -o json --resource-group {{ azure_resource_group }} - register: vm_list_cmd - -- name: Query Azure Load Balancer Public IP - command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip - register: lb_pubip_cmd - -- name: Set VM IP, roles lists and load balancer public IP - set_fact: - vm_ip_list: "{{ vm_ip_list_cmd.stdout }}" - vm_roles_list: "{{ vm_list_cmd.stdout }}" - lb_pubip: "{{ lb_pubip_cmd.stdout }}" - -- name: Generate inventory - template: - src: inventory.j2 - dest: "{{ playbook_dir }}/inventory" - mode: "0644" - -- name: Generate Load Balancer variables - template: - src: loadbalancer_vars.j2 - dest: "{{ playbook_dir }}/loadbalancer_vars.yml" - mode: "0644" diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 deleted file mode 100644 index 2f6ac5c4315..00000000000 --- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 +++ /dev/null @@ -1,33 +0,0 @@ - -{% for vm in vm_ip_list %} -{% if not use_bastion or vm.virtualMachine.name == 'bastion' %} -{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }} -{% else %} -{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }} -{% endif %} -{% endfor %} - -[kube_control_plane] -{% for vm in vm_roles_list %} -{% if 'kube_control_plane' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[etcd] -{% for vm in vm_roles_list %} -{% if 'etcd' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[kube_node] -{% for vm in vm_roles_list %} -{% if 'kube_node' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[k8s_cluster:children] -kube_node -kube_control_plane diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 deleted file mode 100644 index 95a62f3274c..00000000000 --- a/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 +++ /dev/null @@ -1,8 +0,0 @@ -## External LB example config -apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }} -loadbalancer_apiserver: - address: {{ lb_pubip.ipAddress }} - port: 6443 - -## Internal loadbalancers for apiservers -loadbalancer_apiserver_localhost: false diff --git a/contrib/azurerm/roles/generate-templates/defaults/main.yml b/contrib/azurerm/roles/generate-templates/defaults/main.yml deleted file mode 100644 index ff6b313266f..00000000000 --- a/contrib/azurerm/roles/generate-templates/defaults/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: "2015-06-15" - -virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}" - -subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}" -subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}" -subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}" - -routeTableName: "{{ azure_route_table_name | default('routetable') }}" -securityGroupName: "{{ azure_security_group_name | default('secgroup') }}" - -nameSuffix: "{{ cluster_name }}" - -availabilitySetMasters: "master-avs" -availabilitySetMinions: "minion-avs" - -faultDomainCount: 3 -updateDomainCount: 10 - -bastionVmSize: Standard_A0 -bastionVMName: bastion -bastionIPAddressName: bastion-pubip - -disablePasswordAuthentication: true - -sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys" - -imageReference: - publisher: "OpenLogic" - offer: "CentOS" - sku: "7.5" - version: "latest" -imageReferenceJson: "{{ imageReference | to_json }}" - -storageAccountName: "sa{{ nameSuffix | replace('-', '') }}" -storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}" diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml deleted file mode 100644 index 057d4d00547..00000000000 --- a/contrib/azurerm/roles/generate-templates/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Set base_dir - set_fact: - base_dir: "{{ playbook_dir }}/.generated/" - -- name: Create base_dir - file: - path: "{{ base_dir }}" - state: directory - recurse: true - mode: "0755" - -- name: Store json files in base_dir - template: - src: "{{ item }}" - dest: "{{ base_dir }}/{{ item }}" - mode: "0644" - with_items: - - network.json - - storage.json - - availability-sets.json - - bastion.json - - masters.json - - minions.json - - clear-rg.json diff --git a/contrib/azurerm/roles/generate-templates/templates/availability-sets.json b/contrib/azurerm/roles/generate-templates/templates/availability-sets.json deleted file mode 100644 index 78c1547a9c3..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/availability-sets.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - }, - "resources": [ - { - "type": "Microsoft.Compute/availabilitySets", - "name": "{{availabilitySetMasters}}", - "apiVersion": "{{apiVersion}}", - "location": "[resourceGroup().location]", - "properties": { - "PlatformFaultDomainCount": "{{faultDomainCount}}", - "PlatformUpdateDomainCount": "{{updateDomainCount}}" - } - }, - { - "type": "Microsoft.Compute/availabilitySets", - "name": "{{availabilitySetMinions}}", - "apiVersion": "{{apiVersion}}", - "location": "[resourceGroup().location]", - "properties": { - "PlatformFaultDomainCount": "{{faultDomainCount}}", - "PlatformUpdateDomainCount": "{{updateDomainCount}}" - } - } - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/bastion.json b/contrib/azurerm/roles/generate-templates/templates/bastion.json deleted file mode 100644 index 4cf8fc7a64b..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/bastion.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", - "subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]" - }, - "resources": [ - {% if use_bastion %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "{{bastionIPAddressName}}", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "Static", - "dnsSettings": { - {% if bastion_domain_prefix %} - "domainNameLabel": "{{ bastion_domain_prefix }}" - {% endif %} - } - } - }, - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkInterfaces", - "name": "{{bastionVMName}}-nic", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]" - ], - "properties": { - "ipConfigurations": [ - { - "name": "BastionIpConfig", - "properties": { - "privateIPAllocationMethod": "Dynamic", - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]" - }, - "subnet": { - "id": "[variables('subnetAdminRef')]" - } - } - } - ] - } - }, - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Compute/virtualMachines", - "name": "{{bastionVMName}}", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]" - ], - "tags": { - "roles": "bastion" - }, - "properties": { - "hardwareProfile": { - "vmSize": "{{bastionVmSize}}" - }, - "osProfile": { - "computerName": "{{bastionVMName}}", - "adminUsername": "{{admin_username}}", - "adminPassword": "{{admin_password}}", - "linuxConfiguration": { - "disablePasswordAuthentication": "true", - "ssh": { - "publicKeys": [ - {% for key in ssh_public_keys %} - { - "path": "{{sshKeyPath}}", - "keyData": "{{key}}" - }{% if loop.index < ssh_public_keys | length %},{% endif %} - {% endfor %} - ] - } - } - }, - "storageProfile": { - "imageReference": {{imageReferenceJson}}, - "osDisk": { - "name": "osdisk", - "vhd": { - "uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]" - }, - "caching": "ReadWrite", - "createOption": "FromImage" - } - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]" - } - ] - } - } - } - {% endif %} - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/clear-rg.json b/contrib/azurerm/roles/generate-templates/templates/clear-rg.json deleted file mode 100644 index faf31e8cca9..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/clear-rg.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [], - "outputs": {} -} diff --git a/contrib/azurerm/roles/generate-templates/templates/masters.json b/contrib/azurerm/roles/generate-templates/templates/masters.json deleted file mode 100644 index b299383a66e..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/masters.json +++ /dev/null @@ -1,198 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - "lbDomainName": "{{nameSuffix}}-api", - "lbPublicIPAddressName": "kubernetes-api-pubip", - "lbPublicIPAddressType": "Static", - "lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]", - "lbName": "kubernetes-api", - "lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]", - - "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", - "kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]" - }, - "resources": [ - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "[variables('lbPublicIPAddressName')]", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]", - "dnsSettings": { - "domainNameLabel": "[variables('lbDomainName')]" - } - } - }, - { - "apiVersion": "{{apiVersion}}", - "name": "[variables('lbName')]", - "type": "Microsoft.Network/loadBalancers", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]" - ], - "properties": { - "frontendIPConfigurations": [ - { - "name": "kube-api-frontend", - "properties": { - "publicIPAddress": { - "id": "[variables('lbPublicIPAddressID')]" - } - } - } - ], - "backendAddressPools": [ - { - "name": "kube-api-backend" - } - ], - "loadBalancingRules": [ - { - "name": "kube-api", - "properties": { - "frontendIPConfiguration": { - "id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]" - }, - "backendAddressPool": { - "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" - }, - "protocol": "tcp", - "frontendPort": "{{kube_apiserver_port}}", - "backendPort": "{{kube_apiserver_port}}", - "enableFloatingIP": false, - "idleTimeoutInMinutes": 5, - "probe": { - "id": "[concat(variables('lbID'), '/probes/kube-api')]" - } - } - } - ], - "probes": [ - { - "name": "kube-api", - "properties": { - "protocol": "tcp", - "port": "{{kube_apiserver_port}}", - "intervalInSeconds": 5, - "numberOfProbes": 2 - } - } - ] - } - }, - {% for i in range(number_of_k8s_masters) %} - {% if not use_bastion %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "master-{{i}}-pubip", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "Static" - } - }, - {% endif %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkInterfaces", - "name": "master-{{i}}-nic", - "location": "[resourceGroup().location]", - "dependsOn": [ - {% if not use_bastion %} - "[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]", - {% endif %} - "[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]" - ], - "properties": { - "ipConfigurations": [ - { - "name": "MastersIpConfig", - "properties": { - "privateIPAllocationMethod": "Dynamic", - {% if not use_bastion %} - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]" - }, - {% endif %} - "subnet": { - "id": "[variables('kubeMastersSubnetRef')]" - }, - "loadBalancerBackendAddressPools": [ - { - "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" - } - ] - } - } - ], - "networkSecurityGroup": { - "id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]" - }, - "enableIPForwarding": true - } - }, - { - "type": "Microsoft.Compute/virtualMachines", - "name": "master-{{i}}", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]" - ], - "tags": { - "roles": "kube_control_plane,etcd" - }, - "apiVersion": "{{apiVersion}}", - "properties": { - "availabilitySet": { - "id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]" - }, - "hardwareProfile": { - "vmSize": "{{masters_vm_size}}" - }, - "osProfile": { - "computerName": "master-{{i}}", - "adminUsername": "{{admin_username}}", - "adminPassword": "{{admin_password}}", - "linuxConfiguration": { - "disablePasswordAuthentication": "{{disablePasswordAuthentication}}", - "ssh": { - "publicKeys": [ - {% for key in ssh_public_keys %} - { - "path": "{{sshKeyPath}}", - "keyData": "{{key}}" - }{% if loop.index < ssh_public_keys | length %},{% endif %} - {% endfor %} - ] - } - } - }, - "storageProfile": { - "imageReference": {{imageReferenceJson}}, - "osDisk": { - "name": "ma{{nameSuffix}}{{i}}", - "vhd": { - "uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]" - }, - "caching": "ReadWrite", - "createOption": "FromImage", - "diskSizeGB": "{{masters_os_disk_size}}" - } - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]" - } - ] - } - } - } {% if not loop.last %},{% endif %} - {% endfor %} - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/minions.json b/contrib/azurerm/roles/generate-templates/templates/minions.json deleted file mode 100644 index bd0d059cbb6..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/minions.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", - "kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]" - }, - "resources": [ - {% for i in range(number_of_k8s_nodes) %} - {% if not use_bastion %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "minion-{{i}}-pubip", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "Static" - } - }, - {% endif %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkInterfaces", - "name": "minion-{{i}}-nic", - "location": "[resourceGroup().location]", - "dependsOn": [ - {% if not use_bastion %} - "[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]" - {% endif %} - ], - "properties": { - "ipConfigurations": [ - { - "name": "MinionsIpConfig", - "properties": { - "privateIPAllocationMethod": "Dynamic", - {% if not use_bastion %} - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]" - }, - {% endif %} - "subnet": { - "id": "[variables('kubeMinionsSubnetRef')]" - } - } - } - ], - "networkSecurityGroup": { - "id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]" - }, - "enableIPForwarding": true - } - }, - { - "type": "Microsoft.Compute/virtualMachines", - "name": "minion-{{i}}", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]" - ], - "tags": { - "roles": "kube_node" - }, - "apiVersion": "{{apiVersion}}", - "properties": { - "availabilitySet": { - "id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]" - }, - "hardwareProfile": { - "vmSize": "{{minions_vm_size}}" - }, - "osProfile": { - "computerName": "minion-{{i}}", - "adminUsername": "{{admin_username}}", - "adminPassword": "{{admin_password}}", - "linuxConfiguration": { - "disablePasswordAuthentication": "{{disablePasswordAuthentication}}", - "ssh": { - "publicKeys": [ - {% for key in ssh_public_keys %} - { - "path": "{{sshKeyPath}}", - "keyData": "{{key}}" - }{% if loop.index < ssh_public_keys | length %},{% endif %} - {% endfor %} - ] - } - } - }, - "storageProfile": { - "imageReference": {{imageReferenceJson}}, - "osDisk": { - "name": "mi{{nameSuffix}}{{i}}", - "vhd": { - "uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]" - }, - "caching": "ReadWrite", - "createOption": "FromImage", - "diskSizeGB": "{{minions_os_disk_size}}" - } - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]" - } - ] - } - } - } {% if not loop.last %},{% endif %} - {% endfor %} - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/network.json b/contrib/azurerm/roles/generate-templates/templates/network.json deleted file mode 100644 index 763b3dbb301..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/network.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - }, - "resources": [ - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/routeTables", - "name": "{{routeTableName}}", - "location": "[resourceGroup().location]", - "properties": { - "routes": [ - ] - } - }, - { - "type": "Microsoft.Network/virtualNetworks", - "name": "{{virtualNetworkName}}", - "location": "[resourceGroup().location]", - "apiVersion": "{{apiVersion}}", - "dependsOn": [ - "[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]" - ], - "properties": { - "addressSpace": { - "addressPrefixes": [ - "{{azure_vnet_cidr}}" - ] - }, - "subnets": [ - { - "name": "{{subnetMastersName}}", - "properties": { - "addressPrefix": "{{azure_masters_cidr}}", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" - } - } - }, - { - "name": "{{subnetMinionsName}}", - "properties": { - "addressPrefix": "{{azure_minions_cidr}}", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" - } - } - } - {% if use_bastion %} - ,{ - "name": "{{subnetAdminName}}", - "properties": { - "addressPrefix": "{{azure_admin_cidr}}", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" - } - } - } - {% endif %} - ] - } - }, - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkSecurityGroups", - "name": "{{securityGroupName}}", - "location": "[resourceGroup().location]", - "properties": { - "securityRules": [ - {% if not use_bastion %} - { - "name": "ssh", - "properties": { - "description": "Allow SSH", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "22", - "sourceAddressPrefix": "Internet", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 100, - "direction": "Inbound" - } - }, - {% endif %} - { - "name": "kube-api", - "properties": { - "description": "Allow secure kube-api", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "{{kube_apiserver_port}}", - "sourceAddressPrefix": "Internet", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 101, - "direction": "Inbound" - } - } - ] - }, - "resources": [], - "dependsOn": [] - } - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/storage.json b/contrib/azurerm/roles/generate-templates/templates/storage.json deleted file mode 100644 index 1ed08669784..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/storage.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - }, - "resources": [ - { - "type": "Microsoft.Storage/storageAccounts", - "name": "{{storageAccountName}}", - "location": "[resourceGroup().location]", - "apiVersion": "{{apiVersion}}", - "properties": { - "accountType": "{{storageAccountType}}" - } - } - ] -} diff --git a/contrib/terraform/aws/.gitignore b/contrib/terraform/aws/.gitignore deleted file mode 100644 index 373687b8014..00000000000 --- a/contrib/terraform/aws/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.tfstate* -.terraform.lock.hcl -.terraform diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md deleted file mode 100644 index 28a9f08c415..00000000000 --- a/contrib/terraform/aws/README.md +++ /dev/null @@ -1,124 +0,0 @@ -# Kubernetes on AWS with Terraform - -## Overview - -This project will create: - -- VPC with Public and Private Subnets in # Availability Zones -- Bastion Hosts and NAT Gateways in the Public Subnet -- A dynamic number of masters, etcd, and worker nodes in the Private Subnet - - even distributed over the # of Availability Zones -- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet - -## Requirements - -- Terraform 0.12.0 or newer - -## How to Use - -- Export the variables for your AWS credentials or edit `credentials.tfvars`: - -```commandline -export TF_VAR_AWS_ACCESS_KEY_ID="www" -export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx" -export TF_VAR_AWS_SSH_KEY_NAME="yyy" -export TF_VAR_AWS_DEFAULT_REGION="zzz" -``` - -- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below. -- Create an AWS EC2 SSH Key -- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials - -Example: - -```commandline -terraform apply -var-file=credentials.tfvars -``` - -- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` -- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args` - -```commandline -ssh -F ./ssh-bastion.conf user@$ip -``` - -- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. - -Example (this one assumes you are using Ubuntu) - -```commandline -ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache -``` - -## Using other distrib than Ubuntu*** - -To leverage a Linux distribution other than Ubuntu 18.04 (Bionic) LTS for your Terraform configurations, you can adjust the AMI search filters within the 'data "aws_ami" "distro"' block by utilizing variables in your `terraform.tfvars` file. This approach ensures a flexible configuration that adapts to various Linux distributions without directly modifying the core Terraform files. - -### Example Usages - -- **Debian Jessie**: To configure the usage of Debian Jessie, insert the subsequent lines into your `terraform.tfvars`: - - ```hcl - ami_name_pattern = "debian-jessie-amd64-hvm-*" - ami_owners = ["379101102735"] - ``` - -- **Ubuntu 16.04**: To utilize Ubuntu 16.04 instead, apply the following configuration in your `terraform.tfvars`: - - ```hcl - ami_name_pattern = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*" - ami_owners = ["099720109477"] - ``` - -- **Centos 7**: For employing Centos 7, incorporate these lines into your `terraform.tfvars`: - - ```hcl - ami_name_pattern = "dcos-centos7-*" - ami_owners = ["688023202711"] - ``` - -## Connecting to Kubernetes - -You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder. - -```commandline -# Get the controller's IP address. -CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1) -CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2) - -# Get the hostname of the load balancer. -LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2) - -# Get the controller's SSH fingerprint. -ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1 -ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null - -# Get the kubeconfig from the controller. -mkdir -p ~/.kube -ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf" -scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config -sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config -kubectl get nodes -``` - -## Troubleshooting - -### Remaining AWS IAM Instance Profile - -If the cluster was destroyed without using Terraform it is possible that -the AWS IAM Instance Profiles still remain. To delete them you can use -the `AWS CLI` with the following command: - -```commandline -aws iam delete-instance-profile --region --instance-profile-name -``` - -### Ansible Inventory doesn't get created - -It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file. - -## Architecture - -Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. - -![AWS Infrastructure with Terraform ](docs/aws_kubespray.png) diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf deleted file mode 100644 index 810bd16f6e0..00000000000 --- a/contrib/terraform/aws/create-infrastructure.tf +++ /dev/null @@ -1,185 +0,0 @@ -terraform { - required_version = ">= 0.12.0" - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 5.0" - } - } -} - -provider "aws" { - access_key = var.AWS_ACCESS_KEY_ID - secret_key = var.AWS_SECRET_ACCESS_KEY - region = var.AWS_DEFAULT_REGION -} - -data "aws_availability_zones" "available" {} - -/* -* Calling modules who create the initial AWS VPC / AWS ELB -* and AWS IAM Roles for Kubernetes Deployment -*/ - -module "aws-vpc" { - source = "./modules/vpc" - - aws_cluster_name = var.aws_cluster_name - aws_vpc_cidr_block = var.aws_vpc_cidr_block - aws_avail_zones = data.aws_availability_zones.available.names - aws_cidr_subnets_private = var.aws_cidr_subnets_private - aws_cidr_subnets_public = var.aws_cidr_subnets_public - default_tags = var.default_tags -} - -module "aws-nlb" { - source = "./modules/nlb" - - aws_cluster_name = var.aws_cluster_name - aws_vpc_id = module.aws-vpc.aws_vpc_id - aws_avail_zones = data.aws_availability_zones.available.names - aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public - aws_nlb_api_port = var.aws_nlb_api_port - k8s_secure_api_port = var.k8s_secure_api_port - default_tags = var.default_tags -} - -module "aws-iam" { - source = "./modules/iam" - - aws_cluster_name = var.aws_cluster_name -} - -/* -* Create Bastion Instances in AWS -* -*/ - -resource "aws_instance" "bastion-server" { - ami = data.aws_ami.distro.id - instance_type = var.aws_bastion_size - count = var.aws_bastion_num - associate_public_ip_address = true - subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}" - Cluster = var.aws_cluster_name - Role = "bastion-${var.aws_cluster_name}-${count.index}" - })) -} - -/* -* Create K8s Master and worker nodes and etcd instances -* -*/ - -resource "aws_instance" "k8s-master" { - ami = data.aws_ami.distro.id - instance_type = var.aws_kube_master_size - - count = var.aws_kube_master_num - - subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - root_block_device { - volume_size = var.aws_kube_master_disk_size - } - - iam_instance_profile = module.aws-iam.kube_control_plane-profile - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-master${count.index}" - "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" - Role = "master" - })) -} - -resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" { - count = var.aws_kube_master_num - target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn - target_id = element(aws_instance.k8s-master.*.private_ip, count.index) -} - -resource "aws_instance" "k8s-etcd" { - ami = data.aws_ami.distro.id - instance_type = var.aws_etcd_size - - count = var.aws_etcd_num - - subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - root_block_device { - volume_size = var.aws_etcd_disk_size - } - - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}" - "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" - Role = "etcd" - })) -} - -resource "aws_instance" "k8s-worker" { - ami = data.aws_ami.distro.id - instance_type = var.aws_kube_worker_size - - count = var.aws_kube_worker_num - - subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - root_block_device { - volume_size = var.aws_kube_worker_disk_size - } - - iam_instance_profile = module.aws-iam.kube-worker-profile - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}" - "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" - Role = "worker" - })) -} - -/* -* Create Kubespray Inventory File -* -*/ -data "template_file" "inventory" { - template = file("${path.module}/templates/inventory.tpl") - - vars = { - public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip)) - connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip)) - connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip)) - list_master = join("\n", aws_instance.k8s-master.*.private_dns) - list_node = join("\n", aws_instance.k8s-worker.*.private_dns) - connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip)) - list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns))) - nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\"" - } -} - -resource "null_resource" "inventories" { - provisioner "local-exec" { - command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" - } - - triggers = { - template = data.template_file.inventory.rendered - } -} diff --git a/contrib/terraform/aws/credentials.tfvars.example b/contrib/terraform/aws/credentials.tfvars.example deleted file mode 100644 index 19420c5a7d6..00000000000 --- a/contrib/terraform/aws/credentials.tfvars.example +++ /dev/null @@ -1,8 +0,0 @@ -#AWS Access Key -AWS_ACCESS_KEY_ID = "" -#AWS Secret Key -AWS_SECRET_ACCESS_KEY = "" -#EC2 SSH Key Name -AWS_SSH_KEY_NAME = "" -#AWS Region -AWS_DEFAULT_REGION = "eu-central-1" diff --git a/contrib/terraform/aws/docs/aws_kubespray.png b/contrib/terraform/aws/docs/aws_kubespray.png deleted file mode 100644 index 40245b845a5094d3cef4500d4f256031329a7b2e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 116455 zcmZsBWmucd)^%`q3&kl`2v*#kLR;KDK(XTPZpF2@1gE$bcPsAh?(Y8aocB9^u4{he zhRo!iJ!`hCwL|1(KcOMNLk0iC4)y}^7z@TjJ{HO$HM8ajIU zl*-(HUq|>#vUKw6&Y6xml7>O|1O1aoL$h=d^}g>w^wn#D0kz7x&{-ROE=B>NK6!Rv z$F|ErBAkv?biUXa+CBTAey{>|9i_g5(edsjpH!?8EIis?=zE?-ZE7(!>_bGO+ql(L zcbO0lao;bQ_7Wy>4oQ!X5$_u1_m?n%8N46D@a67)ymvK>iRY@&1NM1*YXrw4mZ_wu zRq;8MAhVSRWooL>Ic$FINY&oobcfP)4iV0Q!~G>JFh3c-f9^ISIr|;v5TmnbgLPCO zb+z4ce1wvO$(tkq@+_%P8eR%-cF2Z?tF&Xk77w3M=ua`-eiaxoH&XPK({Z)TR9K}$ z-J#;+4dQihvLq9C)5ik7*OefX-8k5`HgNo<{RRFB^ZFcyBpg0dy1{$$#5%tYH%BD# zi^Rcf$dQ)fFIRT7hIT3W>U>vHZrpF|EwVPjPvL>1XO{$>#3mNI#(Zr*wflmN1tSGk zR8=`IpublAo(nC9`9#H_dT8w<#7ecK*+-(G$ju& z&X7cA=R7@tloij}#lR|5Jq{=3d;}S1rQ*zbI}N)?uAAZ!)$CoUPkXRpnjLj~%((}{ zgq@3+2UWzUn*FU0;36ok?{YFWb_D3UKTpv|S4Qx9p^qE4EBX7CRdnaq9(UJ7^wvjs zKPbrZ^DV3NAawP_IHL}2PfUGw4jPspVfUsw#ZNJ|G6ArH-u1_i`RQPL*nHO5OQ$ec z)Tcf0)vt-k?Kr0f4#;ZLP#pg*oHJvnY<&D6Mfd3eT)`MBz~tB7!j)q4{sq};hQ+FZ zBER+t^Rb~@Ix4z*j41DD8ik!^p=e9Z%ZTkTPt~EfI-Hng@n8 z+^*N!x~eMUccpvc=|RhVf5^?YVaVoqH%AiM@F-K$fr~kvXRUGBlYwH}cI|5!nTSlF z#EWXdmDn-0jI>=rlBgRLz$@>x8_~=fQ96a|8Pb!x{*F)bg!HG^I^u?hfZ`X`qmmMR zW%{25Qh|D&U|Km?zY>J)qHn#1(CgV7U4?39KNy1!n;xXW?z!~82-eI7?*+?mf9;3g zHZgv{O%^nvtp)-$yK9vZ+J?)0XeBI|zlOt4;+a^EZr0Ob)Cv&eMNX8_qM54q-D>8lZP4h|INU_$g+AhK8 z@#?!qu_QrBXuMq8as!XRR`w}V8jnOGZ})LM1A^+>v6!5Z?iCV?nLaVeH@ca6n~GDl z)d_40t54QmRo6Hnu1T=G418N^NR~1F$RL&)UsLpmqUUcGmSP!^kbFTh8NATA5#3I5 z#%MxyO!40hW+uGw;^^7c+Hj3lEU}>MKs{ zJBGMdR+d>Cgl_;FrOubpG+8%chBPf!%X;~@|B8OEH34p^(Q8+ro4_v z?a(iY4>btwY+rwUw&SP^>!8EX{74X=91UawVpmODxTo}TaL-!CCi_m9+4vFv2{H;- z*2UQM#kiE%aa&0Rdqwsz_7T;y`VHDY2K_M@|3=3%A^kNCUG3etL)5OsaEdJdXRmvr z@l(TG_1G7ES9xeiAfQRhH?H*i0c!)$xV0;m>@XxLcRT%%X%7p?6<pdCvF6^B3Yl+Lk7t=@UKz!7WWpa6MRTDfPzr zv2CsM=M5cySe2Ftb~*NJa!N2r$Z1z8VxDY*1KHZYK8|qW>tP)crrHuB&(^%S)XQD} zQSmjpp_nRpHD$Gx(lHiImNOC){y2-4I41-t*9)z6k13K|GK7#u*_fPEmxJUMe&Pr? zS-&s*T*!fLPGzyAyk0S;ET;0oEnE$((Qi5JVrChsQXapiP8Zr6sN~&m*2J#WTYIv8 zlRRZ1qkjVh_o=Yr6ZCXWXB6&T@w1p=cA^wA>2T)Q6_F*W zaZ!T(h&PVC6kKv#o`)ZK1cks?znJ%*gX&h`0XY1(ydo&vgI^fCVH5C!nr}x6lvIE|Z(|Y&tN(sBo(wvH#1muGdq2 z2TLw5O#X&abYE1IQ>Dt@xcT=zK0(<61r6pPki_|-A17WJt^>cg@vH*j2(y@>kF6yG zH;mr#s6cR4IvEc^xC;Qqs}Sz1wQNeN3LV9*T$rd+UMr-&F)h-zD6_ zzwFq4W`}qLa^}YXFn-VvN5m-BksLAN^76@li}Nrw)Cx4$SmO(_=pRd~C6;%7&|9C& z+Nj!4gWh?P>U0V!?uH6xnfo1VxtH==bBdP6 z=lWJa1tfDq(zsUJxYdEgWx=zcp_3}6cmQfjt#(o<)dqf26Uvuku+(A$fG&rErqoaR z8(P@m;w3vmu520;9=`=x?ZW=p=7PkgE*;Lsiz6DAWbrLpa~4E`!zbt2^b$h$E=eTMsFUI_R@={sOL5%;TE5)FsNYxX)lIk?lxFLs^#$4ehcOW$svBzEh%whpz1 zk@P*^_y?`*+{0b;^2>Abcbk@W60z6q6J&D&ScLr;xEr$VI@-=Jp#n95WULZj{kWGw z`CbPOdh)`RnaS$uwb8K+Gvy#8jQec7l32!XgWl9!L~S%}^Y*=1cp{}0)+_$5o&$6{ zGki^TR8#$l!DZQQ#Rs|^xn-h4#1u*~ZeFR|U%Qe-YaYIpUVV4|Hl7|e&Gg3s0&?Sr zn3(3@eT?0fGP_oc_*@ST`^ zeJ2c3+Er2w4EN~5W*wbs+&%CyCC7M===(w3WW=ghQWd_s@v{#n`^!a=dAZSO3-Rx7 zl_S!`ypjC~I$Zk%h}v>#{JO+GDCNHbb1|wU?TKz1hgwb`;}%v>AD7rJE}eLcQY(A1 z{4V^7m^4MkcGT<6WA@yTgeF5b%$x;4WLFqbbLiJ+H7g4p(soCC3zq$;od|K2$+Y|p z&dRzsD6R-2y-GzU%iiI=5MZg^q5=Tq0BLa%Wf$F} zbcA?Zm6e{;;tWEtaF0)R_LjvA+-zD`%8bhFvZIQ;T6c4=iny?Zjy+WpW>-;WnQKH6 z8$EnHs3T1t6PPr1-D;moZyJ5xkORN2|4fjaaNCT+&4Y*f3ss zLt<_d%{n)rIf+tHxqZoM3w-1$aUJ1357xv;;rdm%^UGG^zo7z2LS#4V$aecWdt!U| zf+2is;Fs~835fvPU(4HcUw6^0eaWatP;Xf7f0*w=8WMtnpFa9SYbjdJC+61DW;u%S z7&8PvCQc=ok{tR!2DLb(9~mE?Ww6)5bwsmC4oN4fTN%y9wz$KdiLutzy!Ai6|JNfX zo-Yl<|G`N6Rw!=`UGdf6ondQTJH=1h;SAt(psLXSm?i%iHl+eQz8__i{z(Br8@?-l||bkw%EkRAurj;lE!m?-QgS317w8 zsxd}q$6}p(L<*%zW?v^Zmyl=^+9|a1ZlZQarh8+wj+EV6QMz0@j^KX)3ILlD53aPZ z*u^R3@Gq^^f>U?T4mE4wG|+hkOnw)bqh5V4_{cMN5Xg|icU&h{a=BvD=819c%3OZL zIRjl8+0>=s{iR;WGxDw(NqD*H=P~cmk2#vAl9>^Wa*Q+?=io^z`y|yvfaCOF-}8=T zcbg0R4>c}sN0`e}+oL-pj+0|cxl}INy>}}A3mKmygyS$l_)BBIEC*G0_-A|O0rZ{P z0ac|4InA#P3@KBs9MFz&edTxF_mN%lzvo$PyptnS2;>5FSoqEf;ezoIRx#r1pq6-* zg*+WZ)1>0nQEIsNbmfn1+Pq+G+~0-j zksQe)Mz}p;zScyxWZR7`^|7g{!u^+bgTkT{fu?3wpX1}yFfp^7mjOf~53h0;&Yk`a z$P=ezM)0{iGLgUR{Ke21dd-cC=dKOYZ7%~tZK6YT ztJlcf=FVcvTegMiE{tCf(c~+4uWdNz@K2sT6uQ5<&GoEy*32UsX{9G&avgOx6uPd* z2XM#!0u|cXZnL4-OnzY~75P{@zWZ;885=_EhMEEtLZ1=&=mDM(*Z2T>P>|`1uYk$s z(G)^bMm5;bRAFfpeEZXrsj6kg-On)YlAKBDXum~!PG;L1Yb(BZ`BZnzAYo|$=@CXpED~aVv3WB6 z+ufIm0*vz2v|FKpWdMiJ>SXPb08Y3uIAIJx3)B$88LJNlpc%!-nqKX}gG+!@GX@b? zz&92jwDL47Icqgp*`qP9C|mDu;r?!8RVLtZ+nnz3QVn7!FxBd$PniH0reUw7eB_0f zcC(m$QvE2Lh?Q`$s^?S1_*!YOcUMv%&!*pCr8sj-7k}zuMwu^hJ=xd(tdDJ=gCX3b z%S9tte(GdEKNB!%wAyWQa&PWJ-O(7V>heH3FUoAUU*0>6m*-?l>;?I)}TKwWs{ z1GFJjyaxyxw!yBs%KgFQeslED*mM#der?<@Q&q3)kj3{{32Gm;qq0iQz zEaK7?ZqF?XQ_q0|&h?0u=zQ5HlTGrECv9rVrE`xo-Hk_4cgRZ^IIod7cz3Ae?!_>lcFRR)}Y_!Hk=>M`hcQos#-h0y93Xcy>q!;EW* z%@Nt0Fkv=wd3wK|UO?o{hO*5_lm~lBSEImUVmT@&)`bD9)~EZES*j}ksa7}A4ZSws z3$I;>MHtT7%b9+srk@ohX#06YwmaFzJ0*N!HmH4po8DAJVI2OX?zD5i0w-K4!*r7= zEjV`^Ad*uUZl*W#8X=(Bzs7K0q7l3zv5UT6B6+_2}44O zbTE^dESpZMvs@A=x&C0^HkuH!U?c;!92Jiij0RmfFQEyi?=sxS?2<_<_}#K`)o;d% zhi+xQjw>C{V?2K#L@lBi_jPQAA46!*#;q=}Im^0r6$>+TM}Ai`_oHa+Q@kId>zL8u zLm}_KMp=0(Sf>l!!V8J%E`SxnnOn?|%p|5SjTl57E=&xN1`mPpVO?SNp%%QR&fz^Z zB)$ZdI99b_qxujY)^KfH>V8H$`?3Fth9ow5Kx4Ummn!n|&Sh z_NLgdr-htM@+q`VlXo|g{8*6A0ee4Dtz-LDsTO+L1K*a>bqw9MTq|jFr5yZGJ$|}g zbBx+ek4-7PynCB{^GsBQ&4?;b#N|ZRQ}yR2^3N|4Mher?Xzd5+q>a4H2DtWqKoX8K z8{msTvHxZZd`;opz+;2;ua3-0CU3#*-%-DC6+OeHq@@SJhNLBg{KK(PX2n`45O`QeeKKI^J{+KCtAX31B z!usJOT#1B_0L?wix3{#GtWo-VsHies&L1WJ<8aYi@c48uSm)#2BqS&O16`e$EZ<|jW=rLY@-wY-w7S#6D=2NIchZ{vZ+k~0}uqIQ2JwQ9v;9y^y zjn_4SC_GkXmj1PM9e5N4jZ(^aXCE4L8T+tG*e)gaHESD3M`>?S8XxsV@mM381^d<< zK$mc5zjONtefoOjUC0Grs-PYTJ2q)P*;_pZIXl+_gliV_x2~Tm)c&Iyp+oh}0i^8< zlZE8ZA9YsAV zzPQTP2$x7u&cG@~O-SVK^eC;5n0z({yRHqxti4DbdLfMPE-PFS z2u9$(CsIx1^!^p+*Yuag-5dS6Et@)3pW|(aq?szM5K8}$W4~^gO*QYeabq-y8Zwpg zo89zKX#H}tg5~@g3f1ABgy5wqOQoeWruc&rZqO@Zx#{^q92Zu zf=8N`JE2r*Vu31YuCtc6m(2lVU8Ve?ZUPDa5DKy_Rt2u)@R9ytBd~dI-dTEb;pt9(BoEOmk!sB%%1>&g)tkQ)D}6%x6EDt+t%Dr!@bG z$qO+b<}~2q(HB6XD|5&NTddn#b}b93ghMgrub*NwGijAi88HE=7&dA{o8KFA7Av-H zvcy|@p<Z6h7xeLfU= zRhUIU+d;`*h4_x86`2uLz0oxRy9OSdl?TiCfsUSq8lP>61^EybkO_cX$WDrAqK`2e zHr8dGugN9GW(s^kE;7c&k3}&QXBorEQ69AblU(bF0&zpf%kJ$PwJYU9u^jl45A%); zj?gs4S$g3QBW?o_LVuvHb-KLs8bU2gSH+BS*?jba)6spZCu#+Y_{QMB2WcNNWT+sw zy@Fb`P#6oix$wQ#bphBj$_`EEk5M02Bw_1(8vlV)Oy79eJL2x$Oa%U7MUBt{Ni5%J z*mp`5YgvIW5Bx`{X5^;efo zPw&)#nr}%k<>}>3i4OM#gymwNH{J3$%vb1o3_Z8n>Swk{h}Wc(jLGz*`1pW1C9{5- zzny0>=ne6By!34}Ds(l6-IxTX_7TNZvfVUOuo`>p;4pj)3usQL*^K@oQLJ}ag2s6&tJNW!Wi-3T<FF|+1FS=NHMox06{D@n zyr1^K_};$7qLYBxMurSuGmr(>ZIbb$gPZW>_D~eyeH-wzD|Q8Q>|*mnIgaGOPUAal z&>rW8mNpd|g^*^9{2zV2@Wx?4IxgtxwGWErj!EYWmHL)lT~X=ZgRih5rh1os-uC50 zTkOYvj$Uo{e8hNS-(E}*uw4t(hm+O%%U9PxCFE9u8W|g z8nWR0+QRQ8(V|$nYWc%)fep($d*{9**3in`e_^1~;2lE+Rg^aoi2xFsR z_9A0RQ(4H7ed+W`Jw)mo)~~r&*`$GMY%7}c54T-9EJQ3ubcgoeDADLQY71Qr={6>x z39dR4^C77n8WQSp=X84+9UFayYx!GRXjCR=klLOBA_G^VMleh9p!4-kd>TH-{d>(% zEH~oT09*_@stWck`WTQp|EbY}{&NIBn}nwN9c4Sb^uC%B#l}NY@KV$#kz!QMkczus z>O|O}JL*eDrh@{WYest0Jkgk-N^?5(vnb}2yEJNbahJ;?hQHS~zJAz{rc;g#BamVG zhiW~MbMp#CYI)NRzX8hUGs4_xRXmNK)OAkZtqn%J^^ei@`RG<{=>|B-08Lv6Q~X?} zwJz94}BGQ8gX(4j6CM6uU_ZH(o2^O^s`_bEGk;`c+z z!u+7eIK86Ypq zal@l5t0t`kK_hJ{D7$#GCFN(Hcj*xvUamU0A{pq%5U<6xdGFbEGRu0Rgl)N9!(lZ9 zh}+&4Vf@27z2xof+YT+;gM_Q4zgc&(SS27-h`5Rs5FUQGu#OBpH(8&mVcZRiC--j7 zdAPR}n`R<5pFV$k;fG^E4U}Glnd9V&GU4IufIPUxdZFAUr*leg7E9$Hpf=FZappN)=u)hIl&pepmS|UorI&rhr8; zR)9#rtx`=A^T&eOZX#u7G*s0D-v}IS4b$Ep;N`c$ss-Pjfbu8y#Ni~#j)c~R#BG0= z83LHQ76Xvm4uGgFF=kAQaW)75^V-KYtp*)xJcFQt1dYAyAt%pB|5 zcEO~)00Rl$Ff1V%2Dnein7=+(fzS0@Kon?3yjJK<*H?v?{6e1KuHmZWBuP1EtA3F(PP4U)Kx5pON6v{;|^u6Rp zN!~XwhvE-~Zkmjq32lmGp5PAU#uQPZ;9qYVP>lv2LXu3Oo_fN*r7x`_W$gVK)j+=j zG1oKo_Coj&7^M|8)TyEduX^NW23tQ1g;Hg9|~Y%JoIQU!QGEWdwkar5y)!gIecl24u1$&vFMbiq*| zdTk!=I?r)#Ppyg@>XH-H#SJuvDKz>F1gyGIicR6N3Nr}|d2lvH7f4@0zR0%aSYVJtMcaMw5eEG5d zB!X>Y6F;nv0(AQ)m((1ZzFG94ksP8{;(uKn2ZgfMPjj67nepH0_bAcOKF?OCV&num z#GTWtO9Be7K$Lf{Tgtf-tokyd|IAT72ehB)YO`og8zUd+Qhu3WmmqhBzQ+V5>HBz{ z1k=D>4|?Vkx>ayFS}W*~3N3WLbU{B~2!h|Efqg>YcW&@&E8U0p+3xeTBw&%HeXMk? z^MTPb_};K*VidGFmH$ADI0Oj`9M0Y=LWH)qKvToSC;?kVgINpipsTy;;HB$V6FTfS znq-N&m`X~I@P7v5yqsuh`Vf+Qu1eb8dChEJQ##z>F+V0Ml}j#e9;SBXNFB=6f(of; zMa7<8K*8<-(tCx6(NxPbivM$K$X_AVCba`VAJpD5O6cJf;dnndMh1B3JF&{UJm#ia ztvcRKr%;~!bQovP*=)<_Vu9z!g_|$t%lNB|KdjGs<;Yi7z=y+am%Q3g>v z&cwe~6k2Xqx#a0M4|C9Z^0XN*^46iUZe~lAyki_p$THI4nE76bnZM#%3NkKwt@U1H z@LSPO6nH)iZO=tXY`n9L3CqOqIp2Qb^*G0I44eL(bVzW+(hsvA?_A92V!+vLdt*01_9*lbDC|p z8|LEz&B*P1fCa$XV3j$5CD82`uBK-5-mn%clbmf2E?no!E2m?Y#i> z^QGzii2?96?nkC85D=L!=PZXczh@92t9lcCb~cr5hWSUO0;2J1o?#@hQti8i<65rU zXJMkf%MW-|CmI;~xLp(8A!2GrTxi11rn9Fc=OLx`mc?!zyf!Jn3!t-1wVg;BC-?xwH_+$C8-adFC~$$CIY}g znrC)U-iE^m0Sa~`d%S74y&B9qp-Ds&ty-V=?y`jy$m{q}&)3PJZ29j?Lt#m!H9Ww~ z>4C%{JXn8JE5M;7@P7C9?>n-oyI-N^P}{#`SkI#A?>PhjZJ}DnGr2u{^aX zC>YeCy=G0-sm!0OnOz|mNu0+S7@~6OX#ey~f757?W!97d0)VI4uw}Kq^SC}kUPEK9 zT=2P+p{%%Jm~a+i03K+;{eChE?o2d`1__Kd8tO-hCq<(zn*H=tIE>a$2{CnumT@J1I*C-98k+z-lhrs3V##;9D>Olt$*CK z=u_bP_apL^LUgz;8N#?KX||5coju|u+;7-b5?6R*;AF_d!M3PAlYl2CP>h5j-!*-8 zerD|)wy+$~fgPVnooXUjHQ<;z@j;K#G?0yS71iEbpEGBJ$Edv=f-}Re zN+pNzbM9H5osF?vnxT_AjA5j1`B@wc<~Og1o|A=L0)xHHwr^V#>X#^;Rs|=F=bai~ zah30|zj?(m@B5S;51_a1T(CMDbycq!lHqGWn+;tLW8fOD3262p z-C7W|)pS2vE>O|!m7Co!(3=H6#;krLuB$9NMo;wik7_IWuy7C8@Oze|fk}43ayZ=l zM`P#4cY?#rWS&PCL!HH((>tu!XB+8WBd(bzKkdcITWu6I(pK4Cj;$Qg=z9))K0%bT zWO605l6yFP=XU&`E-q*A$qCVkqYMb6uE2IRzz%d09JuKA6mOG}Wci*J1@}pR3b-_W zkjwM>r{*lrkXOi(F`vj^j@Xd)H;~;~GoMpBb8^8yyI+7}p|I$$@(bbz7YO%)eP=Ge zT56Qjcw`hI01N-9@kODt*)TkPOOvgqaU^Yqv8c`6Ww;#w<$hGk6MGbAz)I)EO+=%l zXWR8aP;Xhct^{9fXQahlqZMAh?cc7Fd@d|8U}*0X*Lf{e4NN;1Z7N_78p0<~3MD^o z9F9P`vkG?x?-M}eL^@FWMoBMC)s5!AQs*=gBN{$-gMT}fE<*{ILS=Bw@Q+_?&EtH$ zYGCj|09;uG)cehY)GWox8K6br!cv=8l|Ex=7SD!}AZzAFfZ`(fj-L_qPta`gusUfw@_T75 z#;bIu!Xwb-OGxW*%KWR?39&^wLIMi` z5CC*AXLO?s0Ksq@O^0E~<5S~G!~5N`;$Ei{YY@D~8e(6{AwK9=W6?9u>g+8>P7e~y zHCvgaVP=b+ZZ$8auQIG3BEB4y3u!y!FJASJ`Ge8`hV>sm#De=q(2rm?cSLl2d7 z5TtlRossysLxsc0lRyzR=8I=+ThGs(=rA5I`Iz5qYCyKv=AEZg879j<+V_iKm5Av4 z5FW?+L#2M!+lDFyJE)!a)(l{Y!+n0pSfS5vHKMl}KW}YZ$*XBCo(1m&%K~gC2VC85 zFlU$?j28Umn87%iKaE*;0?pWZ6)1k^H2{YHg*tn4&!K+pWB^py3Id->fF#TxV&A%$ z{8k5x#PpJ$HnbyWPE{x4Z6RZ?bap&rg=R#69((Z*;W)}@=?a(OM`*V6{ZK`YqCt_~ z4oiHXmg~gPx&}5OoX@!ZepSqIVlfe*F{^?s#o(L(EeEy$)Z&iLKiHT6W%s#ijIo>h zPR(`jE+^B&&Uz<$ox0((%)h@GvZT*0QtRC}&hAAh?aUK?g$fN07WwHUrml#d5Of=Z zu#W2e-=m#Z`5v}|NhI`?W4VaAI3V=%_(qk9B-Mxmj}+S>o7s)I8HP40{j8<^WJa`0 z@t}(143=4E1*Xk=wZ#M>!i)k zwjE~67G@Ti683Kg8V`ipSrH&==gDAr!UK3Q^>VN@{a~#}C?ipIy&!I_G1522)$A2g z2)Ca}6DQY#-iJk5t=;fyyZnpO4eW?<>?LiLtmp6OzidKl7X-+jEpc^@q~qh^^7g`j zXTr%Q9mlE{ecp$&(n2Tf=*pA>huCM9^nw@oOh%fS=h;j4OH~-K|24VreZqbdAu$1c z(7<&K?JehSkXt$F;;%&W;rA6Th0|+1;kM4ov0-2E1&TrtacO;nVBZ8EdG)>8P<1F` z;$VB>)DFE|JfsQGPbMOFm^n1zxvuLI029I)j!za~uXq}~KV>F7n6N$41Ap?%$#rB$ z*k}{|a6QTJRwPr9>a&w0UWD zZ!&4m_EZ_w-d)d1+--zepLcZcC-L0|Pd~gZ4|7zxJ2Pi-IihYqJY!QDrnke~$!F~y zX_Dzl#xY&%yOUP^2m-|2?3;wGp-NN+3gh{pm*G zqZVp85cbJgM+pAb8<24&9%crR9pY&DURkPiX|@9U#ufru6MuPGk!So(83_|%N)xcd zWH2r3hQVulY76Z4`k8pY*-RdsxosfP`LIzEy$P}3y{W$m>eyR$SNi&3Llb%b_!9}M zliBmZsau9C6B(I^x$O#TMCjJZqT13qoYb9$xDV3Uh>t=5@-=5%_-%>S2lTj~j;h}e z>O0#&VW_6H{H;N&&3#38b<}{Az*H*xg9vYX+q8|Mblk_ix6LQj+KoTnR3FvLu9Sq+ zbH@c)jGh6Bh4LzwiagOpl~;i+`<0wAmTphsNbA5e>cQpYOC3D%7=a*gaa$g64!@w_ z!%f;w1fbiiWSVri-&A7BV(BWJeqtJSn@loc||1upqxubr_mBl$2pp{ zy0lBVm#w)Mw=5}6=U)=>k67SF#SDo%=pd7rY=sWMnEgwUz()| z`A={qD66&5%-883Ofjon*|+;X+YUR@Qonc~Ph-#q1Pa_Pz*}@(j4Uc) z3z?5+L#JA2xwG*LCHZ?o$|SJ-PNjYgg&by1R%Q4W60SPII?#F2e9DC z*?ijTKg0l=(jm>w+YVQ&-8|&WrtP#p*_C0iVZE);@BZG%_BpYAVI@pkb8Mo~(Glke zZ0=6lj>~K&T!9bu{X7l7dSp?EF+6kb{K>Uu?-+S46(FM`0UMejOy9Ag2jF#&G0ss3 z__v+0%duUt@I=bpP7TEgOx_9G?=Rc8f0*fE^rh#L-w*J9f-=c;hb^h)zTQg3Nx=2u z+069rl=jE?tgb`sd^CM`)fE|XCPKV z2i=Bv>AdMno9lhGbrCI0{l}`2=@>+IN?&C^IIZ` z*il~j?Ug%4{SvTS!M20LQJXWVYVf_140Ty5?^g3=Tl$2SjHhLAOuMgS%iRvi z9gyuD#@Ft4{>bR!rgd(M9s|V2_{5Ocd8|6tA~urLPyA z6P4EI+@jt)Ag=J8=Y@`_$85~o|8ZKb6zp$4N=r*G{ysdvl`1VB9QUwb;lk94n;HTlWNk%RTG?JljHMjBC-Ey7(J!@ z1?&L|zQ^GZIF%nFuF!go?j@J-Q_N8mgUHRh8_x?_wE)A86Q_jGJ*#N4{y8vF5A{+ zfq1Xl^NT~jS|Ur zcdj_m<;#wi7qU$&5h5eN)M%|q=4@>=Y`yp^Zj;N|^|I8RHR;Cd3CWFFa(jDa4PcGJ z!yABx1Rt*drU1#unD~5c{M2&TAUkH~Cz59BxTd9lhmYSa8-~$q$U|+WbhW8)C&I-e z$cXcz%p4@~eEvaoEYP3nowm^I@aa1^6d|H$>q-%oj0D>7C=u3RO3B7pP&ZW=ip3=Y zpax$*3xm#3^UQe|W@VI4{-g7qzBE2lv(-}fy}Qp09ca;gb{ykjkhi88LKJ%HB&rbF zCU!u8girlIJyY}ln?)f~Nb)scdF$3_v%Nf3F(nf|&H1X*SAN|7kHx(XUWaL~k9m`Y zi3As}am>-m;*eaVDFRU0`HJjCh3Z~s-5mcAD;qS1Y1Zf46Yo<@wrz|qP15gnOkTuc zZ_dAG{*^eQy!GKj%=qR7L*%DHRlItK?v}gL_xyvU^y*Yi0X5cboFn{)HpC8RH+L_O zdbT_Jbp{9V%pzPdn~|#+g9+1Aer~ca@g}PtxR-9kJ7>vkrx!9Om#k8(w+$HuUP`Fo zuQ;Tg`v$_di1?vsqfih%N-ngSP2a>5LWg^)k+SiXu#Qe3X2}--XLos-KOGit*#IHa5y@f4eWH zeRp?Ai(!?E3r|;e_Xc}!N$037RN|*J=*|JeY6eD;db5vW(ac_B3xPZ%p65UCX}re9 zRsg^=>ic&W&ag5co`;=%b|*SB1vYeWgbY7F57A?TLhn--!gqV?zx@7f=MJ14uQi4- z9A9_`vJHQRnt3imH>x%nst;>JQT{&yDKrXDYGHPNpNM%<{X%d0)-lG{d!%J%mA?5d zab>}%Adutatvm0*Zi)HJ5}VzT?63r6{I3HuMe3+)3Y5L?M?RO)W5^zUf z>Z(r&mLom-+krJUaz_e0tln*HGTTh_Bj}&?BW$LYd&~X+W_ogRvC38j_N4KwZtl~r ziZop>8Vr^;X1hE0vcR3xKPTgzkR;`~4xPfGz-PL-6p}%~4PKLP?B=#-1|v0<*)@Z|Q)<-HyW2nSp#Lt)CLyj zG8ZU~U^iX+V04iH$73+z!B4`@2{%9Yx>T7?{Yt6|Uiv!kd{;%NIutg{<0(`jQv+vj z*|Yryf25@d67>Dgaa|VS z<$T!8m`5bCdEPjA;Jli}!=4x!X#U#ob;JZ44iAgY47%SBZZ&iwJ_VssG{K+W-Xo=2 zDC4+Y0e0~#X=)j9-Ks060V~38x;>Gu>%F1&`6&*zD*h0x-3HQZr(+_mwG=++tw`{L z`+^O{cW?4C+P0qj+7j%3>WcLx@#7#B%SNo>`EJ`$0w!n=Ta=Z`x(B>gL+Cu|nAL!{ zvgn~D$K@aNx#|#F2ls9#gA#7DZtQ_lgXIik*`jL>RALVc(4>aL1JS{cUHGJ8!#FAe z?@%B-rxyihr!$msk5K!d@QZw+; zYW}$V!Lfas!>vhz49eK8H}79`Tont_>Q$`K$oN*Zg4a<#mZ<4ffX{SSJ%ii$yv{bF zSxyjlYsad6et8d5tFzaw0wfw&beG{z0RBo59T_tV%AH0qY+RQSmCRbROJm^@dz39j zDG7g7omATPgf5ZZ<)~f+sHluS^O5sAlph9rolY0*#)b+65^?hEq5_I|%|L&dUM8im zb6luZJ`e%jn4EizZFtmeNml?~7ur17cv+++DBRHY_}U-mmo(Yk=wL{D(r&spmD}wK zuJiKs5`70t+?)F?H9MSPtOR!$c<`L|1@KARowEf`&CIOt<&+H-gY`>VYz|7!*_*7b z0<9T5^HMed-e!-1R)jB}Ck4x21%Z#(9*I~lnJ2_GL-pNXVg?@g=PpazyxT``#r!&B z$YA3tskqC(Ifp8C)%4Q?{G z*g3Uv;4ofqkxSDHds%~efaN@Gm(BRi@K3255oEOfsT>md-}G}@!Ic0XGMwO@L|mD@ z39(xsd{XwFw0X>fgr9`{Bbt1`;qmllJr3H@7<|bG=;1lwWo=-C!$Noy(E9WL$GQvR z8PAVVP7hL42FsGWmpt@G0R#4nen;3OHUc{E1%QFipzVHWO{N7AL z%5K1WJr90%eKS8KTJImpqHQ5A{wSqgmFXd(0#9oh08zHH!5^y~~ zzM-Iy9%Wub+dRw$osghXB@;WaO*`ywt{@rA%V@SapA!}7CgS>1%o^9=6*};f)A3^ zH2=`KtsKre2xj?qaAg}KnK){u_#O1OgjEsw#bDYv&Xrl zvkAPM8kTP?_1arDM* zeH)h}Nyk3{7UUI#EySW^w-kZXBEvL!XOy85kQp>scL07x1q^^wLAbJXkX(j-=ucwT zPmTqlP@UJr8*pg`?m|0KVoVgR5>k04MjIv=`dYf5B)e{4ie@t9E^4=HKRBdi>|8xh zY93SZ&~F&pd+K_F_EQL4f=UK^ zu(iacN>byM$|)pK#b;O<=Wx<+jSO^FQ~TSCe7s~fhPbcubgPFzuzSDY0#sM^CgSX= z+70ea;%tc9xhdb&U(YOKXVzLSzN=e#f|Xo;HnadS(Cow%LRb!Ej;0*9%OKb2wp;D${FiKrutwpVR3!%g7 zIME$^)7tBum@yQCV^uns{+s6VCq?+w3Z~B2-$bdWn5Fm0RNvZeoj;l$M{3pTlXF<= z#U}Vjx+0+sLG^iO4$gJ5fp){Y>$daFQ0?wu`& zbJdS|HZjq>+>1Nr{C&flZRT!{?4#Ok`%W=>E9);KP0l9*r0JNMyT<#DB4E&uqV6xf zyFzHSgFj@B4tg=d$E7XIW#$hxum#Ijk?Asi2?7M7@+H8!!6I{4>vy^8E97TQ2uR$(d6xyc#P(R z!6lW*rM868{1f(ShK!P%oN{l+e?G_PZGGEg%lG^LQTCQ$QGQ?B@XRpa&^@FQ5=wW7 zG$>LMA|)ju-JL^sgLFtscem0hQc6m9NQ1z0fxmy;&;7g~-ftcphqKq-YprvgEB3zl zD{K$QTWc_sSsp~0C#RJ%d;H+1yIyi~&L&m5%tJbtutKnbNJzZP9&x z5&@5^IZR~_dD*C({%XH=&Y!A%m$3@YRVMhA83JJ2S_9H|YqKChgT+f7zv^0s_ zaLSKKLUWEcXK;bc_g#4+`?Xo~=@xykeNLZYI5$JZ{>{uM{T|GNjmK8H!>Rj7;VpZO zX7BesYP)!59A6f_WyXo@VYix(3}&bE*q|4KjjM!68~Hp9y*1n#cO!nNCJ7zG&GbiJ z79U;p-;^d9RR&@aHyBIRf>TKggf|TGA*C>Uu_OonIqWk)MoOGR1WJC6Hns&KEF%%SU~x9?28- z{VVLah(O#2!DaZfXw==A4HpC)GDYtOFY$kL(}HC&FnsGO`fwRm7$nk%o2lDT?dzrzzE>G* zvmIgS(4Sz1qK53-(~P03%u1-=NQZGMX7(T=R(W&6ViAjtZ-RIyiDAiAK8-iHd(GzS z(&I0QB)q|fX1I6zSMa+#H|LC+(}GK7&hA-Zz-QY6(`?7D{U7R_{7qABsoUS=4{|tZ z%eVNMyN^9GYjGXI-QJltI!OH;C8weVbZy3Vjm_BlG~8ZFI_&z!w*7f6pPB-} zDiOV#g$VH$8Hb3EUlSrd9=iWSGA=kT-)O~CWB!{;E9L5n;AHcD;p53G&6JL+G#`ut zQ3=ZTfQ9Y7D>5`+a+m|_mnvz;op%a_alF#Dc2*QXo&Pg_z_jMCJbYU?Hh8knXNG$q$+E0>==Uw9fTlzb#boz0>)P!%$rQQC)8vIT_k70Pv!2h%|28G6rd zk9`KnQwOTu9EBar^;5S=2^jI%$qs~&0&U67W~J4>e*JeWJ!jxt0lBJ{E>is#PDkmk}2n&2bf}VS}*0 zS9hP&%uKlQ?_|gkmUR*zW&5}E`5(21kvI*N3cvUtp^Hwfh+wUh_iyOpR1Y=~@TmEF z0m`@=ngu7$+AAZ`A!XLOQzvO``^R)M4_=V)gG88e%rLaQtqo0~%`@HtDgCH&Lqkr2 z@)nTmpuFwn#Ky241n7afR6H>rjw@9_WjazyH);u)7etAuoE$SRY@KCWAij?;FF76N zi+T`XL<38_F?g}jgmYg=K$+t5-9OVbop?bSx&uD1o0 zn0_3`WHkD8k)JiRbV369`!(S1@hy)4wT)m?s>z!Au}FnuJZx(L4ucX70DlC^{WBw*+(!A4bx9EPvInv7_6omk9Kv3IcOE z+qOwXKmWN_E5hL|pN3Lxp%QVcp&L0-$KKpr4G$bjbK^bh7opv{fnXL&|23rx8aBKE z{Y~VWWVM~H3aPUtz6p0t==1eeyOJ$UV1`em!)Hxu6*!zGFf>#a$>G?&_$Sgit@5gU z#@QbGqP!Wt^b!N@-efDU+urRQMdXwvRwnTDOh+O~NfMMsB?ATJAFWY8JKtd?(7)lh}$Zgx*jsKbjF!pdIuS!5)$3UtY z)rqXJeplC+!oI3Ws?V@h_T6E--2BJuf>2idf(;NcM*n-}ZwK%{ z4jlwcpD$V3i3{(QGxSkJe^+H2bpA-h33LP*{UC@@(pJZ zoS~yM-^&B(SdtE*p!!om`c6_juPijdy=G(EO6O9TRqct^>%@nCrSubC?P;#r~1Kkf+Wx6@`H$q+l1{spdEly|N3F_0SOlU_9#7LaBTZ1-4 zt9BSC%y$+dhqx20nyw7_pbUgTQXK0eJh!+0pr9>FT4KGKO}wxaCh>w&y2D4uzR<4? zeIqp95m)g6QD|dZZzEr_XU#RLTWb{KB+z@II~Dn?%TA ztsb}QnedRzJdZ4VcyID7%*n6^=9W)sLR`E47&4%-OZNZtZ2IrqbPC0_}Q9xu^bI;kjp&akL(|g0!fIyyZ8lSMI z=%JKxZ|-lGy~VM9p3wDC+r61bTUYjGKA#L_ZuFM;Q2XQot9nNSxKtdtAna;~_C7j# z0|0tZWua0vGz*WZ_T&+p>Od#%`&CVFJtjywKmBJ@Py0+s&C6HiFi{1SyD*x0j9Lw` zMhBt175augrY>8g;ZU|Ebnv{vkKp_2w|93HMoLyF16YA6^Mtnqob*mt&ChMZe8fjz z3Gt&1vd91$K`CJDpngI#!KeXIM1^D2ZbItW2I3#V7Cl)PF_iPEO^G53t_Ez=O#e=r z;e>%6f3masWPNR4PUliqbE%z640j1E1>dce*$Y#s-&j-!@1ZU(!A==8h{hTpOC89e zDCO0>TpA?E>FuP3(M~*ld$$xPh&;(KxltIY{y}?2%-pNL$gaj(uy(IExggMx0R^&% zYV;G|R72k&w}}?e#!W@%Nrh(qV@oDRD-Tzmhw!ZMcVcLw>iT!=Mj29FyRl=~{(f=e z-DzP7_<5XU|Cq=Its1m5M_N*v60IYP0)%qB728F6QM-i;x`H?}H+YPjNM-M_hSmUs z1NScG3~7Qm&vV9GFv*0zaU;A=cC+8LKX3p2nG7)}%Y)fK?OzK_q|1Yn3oaRHyq`ePFM%0=-PtJod1c1tz9d7#$KnQQd z*_nL4ypg*LJjPj29>@Xjh@e3N(^+9ri#bhne%43bZxZ{BMwP{~K3#^O#QDH13Gr3A z{UbJ$4Zc0M?UJe=0^22ZT0{Wi?aylPp*SL;n`0xumRku7NX=$?o@{iuypJvVYK768 zJ|dlUKjqxhOQ!levkKfkZcl_Gfsa{rPoutSBkR74DP!|Ua08#GicYyQ21qW?6O!Hs z=8(YlXWF%qb6_?q(P>%Lut3p@_xS}0G#%8YSzBdMP!3dh28bAKu@8l=h6k|^Vmo=A zXe)@|1|r{in*T$*JUyyy(!^-x4-xvhWdB;6-kKh<0QyrUqWo${1r%}Coo$FbN0Btd zptnljktL!k_T^rMCOB#XGBgL=h}g=+-y9$^$T@dtEhZ1(Z zeqNOf0V@1QrV81;62_oBr^E8qXc%N}!R#r-=-HnVy41a6b)yM+xEb*7rel(ul?tRj z2zC%WpGdz@8l`PPTVydp`R1#wU{FYZAHf%KbqP+FK|3EmP&1w~SfyHk+_4d2Hz8Rb zOe3nIbD?+WZ-U47Wq?#{DvHhD@|+p;jyO*-uMv-l19xn_OIFukx5C&wU)u~LS%BH3 zlB>aR<*3vIen5>d13xFSj+RAtqC-4X)4_HHfBu~O*VZ|_ffpO1dfwz0?WoKPsiEk) zUJqn?$nyR}`H-M=E=dGdXrji#pbtpX0um`ot(f;Ltz|q#?WoLwV9iw$vj#GlsZnTrR2F#?m!VFV$Yzdyd?~K zM)R(1t&ff&G0NP38Q3M%$l2#v33C?H39w9wuJtB`5+-nZ(#Ko(5h3ZATO#xlnglh4r+WV5A8uHaD%f7G#Ys3hXOBeBl_%-fk4l>U}5H9iPy!AN-X?4 zAkHO$ASpga39$@N{J7E*V%7Mh-)npngvXR6QPz47fPJ`&+FkXC*&-i}oHo#6BZ%e9 z^K#sbLlLls;3y$hBfuI$KY(T-2(NqwuAWt*D0MNPxE~-a5SxmRB+k2kn&43ZB34#j zTjcDyz%rAB3psARak{wd9uD`$A{Pn*wsSBF(ZJ+{L&?vHhC`n2 zWDYZPtt5QS*(~$T*K4V-KgJD;aVg?H@r2A4u^=5l5epF61W{hz-#%7(MZ*LO1iU{D zwv>G&>BKIZ6H$S&Hq(}KLd=T?w;4pwW4%5w&?D(e*Q?9|mkbR$^G5zAi(@$TNTBTU z$dDkV-`W$HAYd3|^Kx%u`#ha)@6ql=xG`z8gR-Uma0`%KHE1}-_P$VfNh_-R!|+<5 zN6*QI{QHQ8IjS$vhBzTjZf0NmpeDuT zer#1pSuc+PC1Gu0v%eISSW!W(9QZF!qfFC1tU7ZH1~gcPw5XZO3t>9o+5coKYpVqq ze(&dyzlSOgmH++q0RORAT(j!Q12z&76$y_bGTbLmytf!HPYUeP9%Jr&f`6P8=O;uu ziuQLY9<-q_wds&RX}KG$hrK|!sR)Dg!OcXOeZ!y^kZD?REu->FVv!dqThFxLXlDzPKLf@_ zKP9PYl3CcDg2w??DubDsh3ccGiVRanF3$ z#ZZn~`8`HDQDmam^D&dPO$BoYDB%rO5?}H3TJA-V$YbE$kFhbkd~7^t&W(AvL(7Zq zoL`c<4v-&c@SUd{L^&NmV6m6^o9wMfQ4VUynLKGLO9QdxI?L zAU%1^p%o*UU^Q1d7`MONn5UW7sP46=8ywz5QP;3>jjDe3Dd2u<$1tde8hWMesU*4> zO0Xxh6w(B)w-~+}M?d7}BFVx#?BM;sef@IuJgkw(TZt0K(F?V)?i#xt z3ivWQcyCbw&Pq5`+=r`X>(W+H&KHf&IAbpSb=?Ejw2{B0 zliy)c!kWrga86%H5+b6F>Nls8H9J#Z4zPAcCI=QsACy$3+!!t4eP8d*7mz?R!YufU z4VI-xN7D#_wJhe}BuSdGKd`(UJFcr=;>h$p^y9n9+vmhrR$l4^O#M!Rs1vWT0}GN;>+Dgl&6er?J415bB5~LbdK6DmAi^b|dQtDepnEzYCjV7O*3#lCRjp}y zE7T!ljG?(cw8m(Jhp6Su)@DSh3g5dK7uM7!(NOov)c%7ezjFjMv{_5rA@%iLT&)A` zBpMyC>Ey6E95u}MURh#vblM!8y(N-u7XIB;Aby94PXr-~d#u3HGLavR-q}J6Uz}!` z>+zaNl`oE%Ei~iTlr6j+H{)c>P{DGsI?77Vp`(XfJO)%7Y-Rv0^@E0Ps1?%Rt)EB3(c(`89z0PVf=BmgBg0!ow%&Yn~~DYdZV1wwhA4H|3BfghrmuEyT^F z_3wpp-4=}3K)_vpXEFkfa<*!`aAOS}=b)|kB3NIbL%0AOMpYg_>;9sAdO zpHOT8-trMfEmt)TV`^;nqUiZKLYC~rfFAlY!Rp0s(;1jB zKFDDKwIckobCL;pwlOt9r*^f2&CD`tBmZ>wu)xFNjl4%=_p#P;=b4g5& ze{Jqn`IGr8F-~+5KuVC*W+4=p$>>Hw@aPL`QR6z_c>h{N)5T$==b^g6_2wCfk zs%soCwgD2L_w92lkTra##aR{C0_-tQMupLg3vwFt_`QnekR~Woj;^DVP>DTwjmLy zI|Pr(wwVaHXJcb_rg4;YwxG>nd-Vz7_9{Xg#nKW(zO32${b@rFBr+%DKCT%-GbuiQ3A-OLoN55#aO9I${F@)id zX&||Blsh+g_EzX`UIXZ`@O;SQ6u@{!(7PxUg-q6AqZQcK4{fnF^@XHoZM!ert)06=3W43c{x2~$7et0!kO47d-$6V?Y1DL|>Z<{U zXfN=WZuf?WB(^;H4g$gp=SlvEBqf12MW6+5iG%-lPUXjFNQ4g}5mzVq1)f`Y(>iVa z<23mrUtsVX5pLFZzKz(Az-+8;IobbahKNaRvg&z1`+WR$>ejlp70+&bgKs&yC5x9Q%WEbO znhBqq!ZH#pT-XMa$!NB^KD42qrSuim1fTpwl5Z+4x=s~CLncIHJdsH=+nyx7^mOS) zE%1TG{oCyXQQ^5>NZjj$z_}l8t&5kRyVyKjVrlgj4F|t#S3}wc8gK3O2nRh~{9E}y zVHt<(<$qFcS?y$3J9GEw=Sq{gD0^rN?~@Y(PHz!xGcXYjK=5?y3h5vncWFrmw>!sx zf)Xm+TV7A37%{!FD)Pnb&2K|$;{2K8CgAIU?7tPgM0tH$}zn zxiv4Ev8M~8F)VoZD>q6J*B?v;Eo`f&myfMG^*OuOe+K@%FLL4H-%E4zFKMEZ5Xgf*GM?*izjCyd z6~rNq8xVBLcrriaBgD;mBqU}Z+)sf>m-JL0_?+mE3~MFVd0SPYvW#d z|HzzGe&%H`@YJGbnr&Qieb-N-whWgJ@LB;dtG)v5++!6zs}(t+Q6spR`?Q&Rm!kBL z(MWf^|5kZEed}>LliefCbA&8!cDQNNBAy~Nou_t$7dx54G1A{j7G05oM#kxBo?jOx7{l4b6#NaUElv!AthJI`?_S$@a z$MoCfaNMclPJsZu74?y6#DF06O#BZiH)8% zHQ(+|{=`SC;6R#H=Qnu2HAUf{D6$;>4OOHkC@|xJuH{#c!lot4b~pbs+0oyoqalR} z_~4)ItaZL4E$1GG&$iuC1zw-v#I%zLSlHX7z`K7m9Fa~p93^Ac%ERDSI#D>#B~EQ@ ze(CcqJ><~FVC*cT&J&Ucmo|!8TI-DTPi3TDC>4%EVN&JB5F$kxWAeKgxT5Xg()PW$ zOU|TGowUx8)$GAeJD<&qa<$ZT(f8~sZA)<(1J zw)kd9*gt8`{3YcLqpm>TV`<|CH#^eOnWvnf(fwz6Zresxc#hTK{c5+8^QMeE<8OibEQi%p!1{I!TGqeli5qX zC~os5-cQQ_R}7{o`s*o+C=@K2QQ7w~svH*7?u)4JKzalBrtFpH2{P{}r=g7OQpjj5e^~I$%W^&Sa;jl{>}JWMdrh#(_+++)#m#Z%9Obih z%zmc!9!``OnU+dsLlr|xa$I?D-h#s<7S2X!`I8qM1Xs;PE?1oAOv`i2)&_^_5kE;{+tW-=qYumlgfWjX zH#R*C3tecmSUp`w4Bac_o?J`haJ-d69!4HU8ZHP!oenGUQBHz zU|;O!B)>$~%etd~2CIJULVPL359gz>YkZ{0FaNML-c|igX8t#&9}Ag3&nMmm%tL5lYjcj!O%7J zC#(GjT}^*XMP_i{qRqIev_655mW7DLH6rfii7S_yn|qci1C1+xf@Mqwr=pKQrt4YP>6Wr=@9FRIpDX@D8YWIXS&lxBC6)K62%9n{Tu z`^SPz=Uq1sEz=)OY}06Dh_sp-Vj^Xu`tY$+Vg5oXc!vN*&(5hKxN0r!Ga(!c;aDby z)aWK_rk-6&B;2yi@ww_B4S75`r&vsAcVg5Rvj}rtKow|hn+odAYc*2lo2=f=s4HJf z*P#_Uq=ALLBr4Uo=pz}yl%72|$I3kH7jB#BCQ7J~Lq)%x^wlu%M&QBWy77+-{UrG} zXdTN?j=yG>yy|My{$pP>kTcBl=@>t{U(JER8cg{?J^d{SzN!AW8C_U|NoQaH`M0vt zGYcx@PBEj|)fr2T6az%o#EGS{KyyaDo# z&Y>1HHzVjxccB%S2uy^uP#T6>!@f1>+x_V9*DoY1(xzA9W%ygtYC%!tHjQhgRlRK& z?|TFrp7l-$AQ`l)veMUeMMSr9pJ_|X?4JArV3_{2QMlkXSZ8?g--97WK!X;wR2RO`J;D|W?EX!dGjsi zhIV6@X10CH`6g+?Vb?#Wb|SYp)t=kl2bc`cipX=B))vZ=T6H- z9D<5~$LndKKi+gb{2t0vQ80~Rj^F?Ovhw(1%6kAY;B94Kvh-^3Wb06$^{2kXa@`B_ zX}hDLN2sCGQ6JTnsW40uty!S_>uWV_zoE>$Ax$z(@Ybq;ALy!D?tdO0RpaGz!Q}SF zoDF@H8aV)6bJaF{lp!K~D2K}?B7%yJ6!k9XiQX@WDc0h6?h!GsINkH<8;=+n(-RK8Woc|wk-A$F%fSrP{FNqfIURXs>! z*Mz+L$bz|{{@d}wZsJ5qDBzVbo7|*qcdgtLIeQ~XBRsZ;^=N$TjIr{z>6|^04UX%- zkRs2f9{vrhDm?}3UyqHR-G52iC`P_D3 z#5vO3df6E4G-@=`Y*A?C0;5)F>n<+J8=qZyOO8CT}Zp(f^b z%J|KN$N0Yiwb5%ND@gYJ04VhHm>zDsObW(wcng-d0~9 z1JNS?(#m2&vj6}wqMzMF->26!hJ{zgvj!iJ66q;okbh?l+Ng+T=CM@q^gQv-rZaz5 z{i})MN1;t@9=M?dRsG8070wFkmp(b=4exkspJHxE-_uX4407**O|lFe9LgQDdV1KstpSy9qa#>e%DHvM^d z6+P<{vd@5Q6#rAUdmCHaLh9Lj#nNn{dpk?_C3%a8_jI5IAICnuf9z+vm1cAE?zhL5 zNk^EpOiH#PZ`E>8?87l~P%|?kxc*CvYflzkMba}Phx>EBC$4ip2%izo7+I*h;t3D? zSwKa~QTM}fWuZ7ii;%Ttu5o|N^28c*`k!MH-x$v+ehi2f z!l=9E$6kleK}D(d5YO|VkfxT32uk#b*KFQIS}kXdzyl=r?=$)z!Z5a5d8Z&AS8;4s zukq{uN&JpBn4j{%Ptzd* z)n`!&I3W8V;DFE^F){Lh1Cr;Gp2Uy?%p7yVNNmS?(W!l?9XBb_rav~7-*b?Q-`jBE zn=i3YwN8Kb4EEXO*xN@MIB9eo9!n38=x5AE1^PuqmQ_d=Y3+;dee8v;v>MHdib@I^ z5S()$RdbXYdU}DXBx9tb10Yu4x;wzye9@QuX%hgfPd|Z&c_Fi;xBN`_oTcqF05>`H z`oi|yc?4H3Y(EYCq#QreHz$6$@gCWcdJV_-hy=S04#}_aK^B z_kMksh~uogh=1{h$Mia!`)HP|2o-6M4+zGn^CXFwzFa^!`!I90j-p;=ym2VKu$vfKCT1q~(-QL&q~G5$UAk z-0V>Rr^$9P*n^KrA3RfpzXS_`EM^xhEa-SJL%1k|T*w`4Kc)I+2#% z`}y^yDR8G&=TnAkv;KS}qh4Udk6;8qt>AwtCxDD?$e-XzfNA^lzy17*K`)wgoOD## zw~-EI>|`mQOzS@+f)(Tix`DY@aL^f8jTd2)3wXbCHX*LOp!41S;aLSX=iqAzNDWP> z``wu`#H2k>_n?NAn!^3)TwwiGZn$>5Bw#p(hx;b>|1$ss5hP1~cKseU!C^FbiolVo)jd z4K=X$z*a>7;PQ?C8{q!KsD+SbF=vt7Yk{4+jTd2&)_JiE-Xfx2qvCC27U&o0T>}my zEKauFkMd!&&{12bF+eq;x-zSwMdZzF3bw6qiv88>)xaYW!g>jj<#-vcxsA$jRu=sf zJh-KMfd+%0h>oA+J@sgk?|V@zrf|+_e$>zuZi>AH&oTdYmXbGo$O)y80$auj=@FhMtD(s@q0YG=u1Lqsi5&J_<}7JFF@}DYjj=!Ia!(N2#VYZ zhDmW~Z&-lqE3jLu#?Q{6IJMfsG$+;C!^seyx3wz*{=Ssw9xSlZ^PjtRw^;>l_jWS- z{(n>O_>UU^hxF9@GrXSHix|q-}R@nef~}Mq}B9BLc<#HMt_K(FWlCiY&3u z0Ec`n!Jt1u4{{l}QPsWePt44Alw2%Coe3{NpTVMs(fR3}f7;s^Ux$!hBKO|ffmg(h z2Lh|B1HRoh>4@U|31v&2cr?E7++;HJh`^!@cN7S&ZU7<-Oy~Ledu@v{SKjd@^zt#h zGs1 zUkBTRcz}4|eOnlY`m7KWGDXoMqkI+eG5n`piSO2Lhm+lg)7nP&wzKU?Gv}*~?F?;` zY#GBAaljUxSia?@+%!lq7s7!o?_gY+6Pwgc73cRwlr=Y%TyPQzbO6%1dIEhw&S^Fu z8XUfy{~mM0^Ri2|=nifH{QYwAT^Ql<)x-% zvJO5L&)z4dAIqXSX$J~6vX~4{l z9Dw7$M>Ov#{Hq1ZO{($rnynCAz2!LnmJGvpvfiGhus1jWs58J?v9xTUT$!q~$SQ`2 zJ4iQ79#2xUN>NDO06sTDo^nBRz`FQFJ*j$F8TWwTE@k2G=4nmr=6TG2mj~mRM)L!) zFHt`?rX~=9a{TkiB!+YBB!;Fjcbf+Y?GrO}aRH{BRJELxEm7&<6Q(Oeeb@u{@lbf!u@aD!G$Y{+1wk+_a#cPsqW^kS1&REk=@i`}7 zF+c#k1W4P$Ju@<*$p`!oU5uIICE$N{#cwK4aYhl^ZbEXlFB-LDBnfYT;XCYqoO6^3L}MO8ZsB zSp@}g8>K`}7FNs*X@US<{&D2O&pCObgyq^dx6RQyO&8ZURCvi1iL?f?^R_4&aj#Ls zEqDqf1{dq{|94#fo2FJc18FKRdmCXks}v>B%6)EOxNoD;=?gqx9 zOuLkBu1ptqmJE_>I3Gu@$%gAIRO10X^Y@DE6Sb&@>>lcu?ig&?iP&K!7r_~l5ku6r zMqCG!^pzB%aYP+l!^nAfwloe*bR!Al^i z6mdsiJj^!HS6crgzmyjfklZcW-ra76@Psna8IY9I#EumXp-hqrf$pdXy@fn&N2g+ZsG ztI5k~k!9VlKDnP-WrF!0p3@z0P@djk@iE<~p^W#Y8P1? zo4%=&RwJ6Q4@EgP{rz^;0!R#9D2@%h<(L?FkY@ZU6I+=)SiYXi*pQQ|9N}kl3|GID zd+}*m)YFWv@(CRi4gEQmHP?aBp93%S*z9+myAEI^lO;jOTTVYc`A37oluXH}&P|~m zn0`Bt)_EQtr3_+D;-jG^4wFM(D39bdzeWdwc=MN(uyv-NVfON&wq=lEr>>m9mG@%F zuPmz_ir2~MUS7z3WC486W#)hT96^sX48aOo)~D9JI&WU?bUBL2lnZ(l_QR%=u1$yd zfdk1eMO{`A7|2BSRDJRC99BPtK+=y{j61&-)99uBZTmbGYD=Q(nIO`PZLRve^o^oB zW<6$<{wj^hA2iC9e&mWrBs#S zU`NfB60i1@%vKZ`<{cq4$*Qtd2wG|*o6mm#5N3|V6c&7p%Z=S{&qBLGmUe`}7mlA@ z&q@ey!H0j(ntqNx%^8Dvjy^;_{FEuuGh_I3ySm>vK3hiAQ`t4#yd!=wn)!k281#4j z3Z7j`H#ltsxLUlNer1($`mp*Swx4*<63Ab&2h%6^xmL5sB7AYjC>Irfr?$UR+>gdC z#fF7$Cz7ucndxY)J5lp;r0wHM5YoOsVqCd+L~rvUa=C^h*p&`2F{*}*H%XqdncT@U zc=IbkLFF5mfi=7DQpz_9%AG?$g@pyBY?8|nt60;B!!KHT@5)RR0015ZYrLTaP+oCX zQA|xLSK`+t`9VTrrZdxg6(iKU{8Gz{^^IEZp`{Wtl>vh7kC=Qak~!KAdNbK}EHAin zUvN;?Q+&7%InIVU{0 z;4_Ko$Dk7Tqlw;%4)hjW^61Snv(lA&Q;8b+m5`=Ou?Pzlb7SHcy-xA?$xmcSo_%<6-d-XbQ7B(!Zvuq98ELiib^MwFX&i;ltorOmd^kmT zEChA23x@uA@Cny;oF`n2*qwrpjt;dkg2hS}&CmahF)Wx;PUwpCWem27?c{Zgbja(k znqp|zyaungN(6h;;rNu;4i~cBgk-tN;t>Um@05NVMlMCEg!`c!7uv)V1ptXOpDTso)9BW8dnm~ z0QDeGUO2YmNVwmQR67;Zdw2?!CQlEpgi88voLe}RaOjngJHyYIikKPW_pgpKg+4|P zO1{5H$BiD#KsWj29hBH>hvAU=Xftr3`vMV#Qbp2#TvHo!JpUz&w)E$@2~nq5ZIe9J zqT}!9zz`oQ@e4tHS>o-ydEd?Jiri5z@qRnnUk%7vM(SfUGV`-iN>8tpCACQdU)haDb~QptelSpT zj1?C?a)vKePAt`{_yT$`J%|zJ0rb#Aq{Xb**@@lO`xbfg7bPJy`c!QUzFAVeK%VOeo=5X^}x@6 zMN#nAsE52DJ{%Lu6!M44Lof$RAGR*Axf?xdD=k&hUF3&g7B9hr8zS=SjL7a(aHaF; zGIvSTRZvROHwylHlvR)-J&2LYK-1rRKk7xY7#aZ#{l`zL)wnUJaz9t@r`*)ntcDp` z>ut-tO1wR{31dSUXJTqFc4`kUi9mDI=>EXvB@0q9sUW!+wtk+dq{IEOKrtBJ)4n(* z#m|XRygYclIpSv$BFWBZNz{G;)67~UWyk^GnXVzR(Cjy9c%-96->u>+HKt^8qxRk2 z-O=$-E@y4>+i2#<8W#ty27)XdZH{6O|%hl8MqeAvpB)VUc39oc$ zF>s2V(i3s_&bP|sc1tP}`t+`66X7^1k9?}OW7i|N&HbEj4&L3EV~bE6T-IUNvw41f zj#RQE+HMWGkxmasolaqMJ!DHqyncy~W2?8@?;oKo<|^>Rz#>b;7PT+hVA%Fq|95Cu zn0;-9VdV132Lk3NcIZjEAoq~tUe16XFo}su(7CVw$ka?c`UMAc3sKtM0g8dqJI>=h2reTe&0_k z*(Jok-+I07+>q>O^ph0K9~&MxoViUw&DMrz(CYFJnaheFSBfz3=KNT@ZeKaq)4th_ z{@z2g5EX*uLPB-Tij_^_m5pgKKNlHgg@QL3y#jKdA-qT?- z)B^gR14{{!Uuw!1mahn3^$<#&J`yG7#=VuV9-<*yWgp8q{j*UW7f~?-BNioAaVr+k z_NvXH-O8rAt|`s&&M2pQcCu5Fco3bpBKV8VuTLu?n04|?7B%|U6j5$%m*o(7@fd2R zFrz|0e_UCHm8U>X61PwMG@gLm5ZlOV>hiEQbdRHwSptUYqWRs%(`_?>T%7@vUs#Ie znLTO;)hQvVO5c8reeeFh1DU+N6duiLl2K1cyk!}-lc~sA2l|A*FOVF0qU&-c-WEDmKyn4(>W%+EtBeR7oQDe z@2?sSG_2=*u@}JiBND-bbYTq71Zv;auI*&wAl5pOt{eulLW3w=xc~ps*tl z!HSZ(SGIoG&y;~rw~aXn=F<2iUQVRuN8&J>;{7+Yu zVY7_#8p8nHH`ORNfjSfR1`Eix@4(YYg?s>8P(U*MF!OGr}-yJ=wjYqP!{KSg2r$@yo)a3C?xCI!1HW+-}bh3ed)BoMjR}X>1LSI9?!uvua!A+3g zr<)FoEQ<;v|A)4>45(`B+CX7bn~+ZF?w0OGQo0*Nq(LO4rKAK&Y3VMNE@?qpy1S%1 zguC|EQ}27e?|%2k{dqX2=^)+lZz!7u-bK>6^!0rDafNtqa zUAUx|4!Fu0?vSvOv67tvzQ@GB%)&*INx*V943oI^ZECijN&y|jWVr@7sqy7Woms4k zRliKufB3Me9yJ*u&mLpk-Xk+h)h82C<>!mLIMx1Gv$6jbcvAA12+FZyXTx zW)~k0#~IvzF4O~Di7^j^s?vdQx5k|`HOP9DKJjbTf43KK20|P4HQ@ly8JmWqj2v+Q zQ_*c8o6ER)vAXeIQd*cKj%ajsrSa$`hE0Q~l9E#sSiY<4y?+eRXx&Nc>K+{slaZs| z8X`xKk(R-V)SsC|qyT$pTMG!Jjv`{#CF3zb+Se9|`q^Mhh;z8BpG(B#i?Ww>fgqcK zPa?iSsAUpngRdDiLA^DRqz(4Sv2oJAuxa=hPM(HuD1Wl%UQ0f-S8yF{r;QTM!?vI} zto29@jUgKgKn-|Pm1T!?#ZQqNVkZ|{KoO5oEO#ba7uBh+Pl?hspaqdM5D^@XR>bx|-8n zfS|~P(vtue1R9T+^7B{rR}Sb}FB)yc17K4$V6q9Y#*(#AiW-8Z$w(S&eJ+9sA(%LJ zDKesT3Xr_7Nu-y>#jz zQSA9u1M(_>GV1^SBp6xPoPZ=QZrk)iswHwL`2Ge8zWp^-}-b07Udv|JOnBBAX(c zg7x^|@^lN(&~HHC?M3X$-$dVC8Pexo5afKCo5Y)Ey7P3%PPBXDX~FkN0w+c9&}#oF z5bZV0{h=y!(H24d0sev3JLJU(a2-|<8357T&BO73jt_GAE6UiS10e*@=BPA%?k=sG zhobgAl%P`NZIE^JMwvpG!t-eO-)8xLcnbC0dZ7FC3$zF${Z6^3xk7 zQS#pDldpyX+97*a1&HZKm(5?%Vjh=09Om9We@9=on`gO&-8=#%c<2PHm^AMQE0HSc zIT}&+DxSwBr8V9p{&aP_i5cpD%xY}tRzFALQg%;x`IFxGtUc=o;RgULH~ANd-x&>) zr3~p$KhlupWvgN)uUF!S#>uO5`Q*aq{n_nIrh)#5FL2^D^Zg+t&Sc=9F`(WVxRRif z4=_~%2*f-zfAS1OCs}8L(vk2;8WSD^o{}3$#peefE#q*i9k4~LB6N?Bf5%Bwp7%Ro zWGW!cE^RVjBS_6}x(#Ksz`MzY$XBO%tC3YEn|H>=W>-tDg!$?w=OVf(l2aWq=t?hg z;{s-I-RAEB_?rP5C9oLH)zRdo63^QqLt^PAzHgJp3 zVdz^CT~`4!%f~oxObNZwk zmoW^opBnmb6NmhOo1UsiR#Ki+1Z4?6vB8N5e*kDf@yB%VG8<lPwI!Y^siv5lPSTtm6R>`YhUB(xz}im2+NUslY_3 zRR3yBgsiB#FMNN@tJG%a$Ma$|PUi?!u7?%g$FiZU|K^)Z$Y_}*fDZ+e@Ejt2bYvNO z(LArwE_svf(fEi5;9zIDG6%Bqc=Wg?L-vL2gbXvt^93MYSlP_wQq^>NxNO&PE!(HAn99QF`RZSwq|<&1eeFzRik}^W5uhy2J54M zv-*bfva{YX>87G1CM?~4^}lp0R#z&H?R=KwJPu-HR|kKRa}3PJVAZrVa=z2{ED3fw51y0dkXz+~e*hzx@mo+s8h%K3~zGK890P!Eem2P_ML=;k*QCsJ^^e$n~bR?6H zfP$|S*;F~kLfOEePR-RIgXij&3~8R@;)qLma#n=(h)++WJiWk2EfRAaei)qpJvb7$ z@k#(qo789Dux)LS*zJ>Oyo1cf9zBV+xBVl_W6fs0)2|1U(>EU5iYkjeHUM_%8wZ<) z-@6nDo*xx#8~H-T+rAv0b!n}2R(CiRKyQfn7CHp=NcDU%bV}uNgk0JFpVx)72>7SS*t3cZs4n zb7>~jGoF>q|C)k~0_?0eD(sfP&LVJNm<7|a^o4LRqd>S)8)!IY*nbt8rXTMKFNhq0 ztq`o#N&%{lveOdC+v>0qKY=^Z7X9>0Z&ysaCa6_#edr`9)Ov3?^ULd9?9{EpW!u0G z2YXc|zdeR?t}Fs0jp!p_cX#`H)f-2aRTxP?b-$$#HP0k!pBs)BeAwxkIkZ6eIP8%C z>P?jTLwM(+&r}Ykc?(1-R_1nxPW4`HiZ=wjw_kDmj64?{QV}G6s!C4r$W7s6ul1Ao zrq9B)8)i_*^=3XKPVpo1-n0?elc~tSI^@bCE@k}0t{{{6DQP#4O8%0k(5CvS`DegE zWRIZ|`3#@6Kd>VH;LAeXUdjNmcfr5gyS_mLO^oLzp*Z8kJg-uNo z$nDzu5~Zv`F4VUYQu@Y=XgH#raW6u>$ZcmG_;4s8S=25UG9arL3H;!war!?k9W=dG zv@WVMfq+{1V&S5OlG3`p9BvX$>>QQfC|MnJ+OxsB#)vQCAnu2y1#w@x*^qrdU*~fQ zG=H}m_s_>@&{}1pWqIDAm5>qKU83;Hbp8lfmH*WIcwzH+)@cH@2r+Su^MFwrAVzUP z%^?^tE>HTdcIYVn3sNC}*sPlyBr{&8fE+R7q?Kp=;i!0&42W68FbZX~BA^;9n#!q7 zBZ;cmGA0gXayBb@OLf-?LM5H35tUNw%~X zH~n*mYeKiC_pJP@aR$;k-kTT|PF>Uyo~Nlo)xalOZ_o19y^nSTNE%KM4Sl8em1Pug)8CWl908ftIWqp;tlE&I;b7V*n9w*fR72KKm(shh>KG6 zN+r`%MEXYGsffn4(#PNTADq=^9W{?%70BJMe62_j=+bWiRxG14#bvWo=|bknrMd3J ze*C!$RC%Wbr!NrH<){6LYv~RSwtuI*RAfqnXQMl%9MH9SJbo-^#S{KG1*BX57 z5RlA3cv3cF84ms*-Vb}OhS5EcSwD;{sLoDP3@N4ZHERA-G$Pw9tXIxp0h8i+hmH1EE zC6Q5gW#UdBK*!CsT`w?Zvh&(OG-zljtz-A#^Wp9hbv7$GLSS~D2ssvdf9h<#yc?mmr{lj-xp{Wsyj{J9MzL+ham1B>O(ShLT0paA=H~>Jek>1=DIz`PoISw z3;*f0dcMrnX0CzNAia}vW*xZ)i@-&9@g684HNgek?gKwD<$?`p8l-GrT4z{<(ZJq3 z`)!awxMNbR0lD?EeZt#eBQ5O3T6t6`vC-L%t=6S`+svNk#_SO%+UlfO_r%fYK-M6n)! z%P3xY#DoyT8u~}}mpG{I+zQFmmHg_w>kLQcMEBkfv9|iGlwMtg=^fm^c-!FuFO+0s zo>^PF*-Z)2p0=KxCIA;&F~8>w{fJ?RACYMP+AT5=G={7l2|qWb)zKQprIi*gAc2* zQBBAuLB&eGUvod{5c`{&H*?^VV}I{KEc|IxtCISl&qG{IMi2*i;m2UZsq4YJ21YY68ua~$+BwG?oB!wLw>%n$lA zBGxhm-K@Lq`2DR)W~VNWH*+*3^1WZ+oXz<=P6j@@yp-rkPO zD7K#4z7mk6Bma4D(b3Q0jka!``&duF&rh50UYYg}Kl@X6ZsnokSjG>b@wNl~d|xG5 zbUO?merzB~y~t&v@DQSL7k@Ry+ZJD`>pW;YyY6yD&iK4M6Nj1|VRTfoGLEI0BX2BaVCDH%cg)w5On_d{N+y ziP$Z9%YA{4PzgOsK^Sf^5arjOLL0j{1wV^rG8yd6aR@u z2a#~rAPNmFOX(91Ll2B(lZqp=Q6St%~I*{&hn>n8h?TeO4SLx?tw_1^Z!T~93zN@q*+ZQ(o5JfV-rMAMmPf{U zFxBgrf2Pc4+pKsOD1e_Psk4~5j1?LmbDV^EiSoL#zOfHdp1J)hI2aor9Y2s7@AD`@ zUPvvG=jY(wk?NACkQ90u9;!4=AQ(fM@4IOWMgVNb@KiDssD}9OnLxy^T8X)UulbTx zl=_)H{9<3by`7x^Vi7v1`V~l^{%H{a&UgLzXT8d0EyZ^#SAf3Y=qYA-4CFk4>>7NN z3MRN^Ghe?~m#tYspij8pBVyi^aR1Uc@WbT`_pXw8#Muq^%^InvwU)u@)jE!>(+@db zeZ_LX&-m&zwYlEweV$lM@)DGnc&vTz!`SmS!6bNBKL!#?p#r5U|GqgH>!pw*;{D45 z%z(J{0l!*73>^rP_Dev)*S?5^lPm)YmX~v%Vx0kmKJ8l{k7v249j;@AT@-MZT0Zf_ zUCiX_v`h9z8p8N_nCZP3`XJ=k#uFMvwZ(t-eFxZ(4g$WPK>W1JrPrJY9zdZ5tYtst zUogR_0n=FMl7Q!iHo)G#7~6%E3%@#kOZf~i`sM<>$0+oQf=RM(5r zTfZE^oqe=kz0-rsq1{gPJ5b24+9O=z>#|d8&;CQ;68p~mpEZjLS(BF>3cBCxUb}ha zgOlmp=&t{`PN2JF@b24TnCY_tcTL^z2(Un2LIg+%{lrP{U!6{EiQ0^MZk?TYxLXmc zrZ?b7Ax;=r#E&D*yWBo|;DW+u35i>Px=0iX19UTJ(FYLz{^3Cx>sODdYad=GIHxM@ znvj#s%*IGyrcGXU^b8K8M8v15uXg~+z&g6-i4o-zH0m-MpmY{l7v5^vGNT+2tPt{k z@!j+zE4fQsB!S+c3Xnb}X0Z3XO_a)`Di4w|=U_IACHzFoT=N?)r)^YrJcR}PiTZK- zI8t;<<@RvMfv;srw{i;NS`A&UL)nMd6sm+O4k?1KLV&#^kk=Oq7_N`BaHCPO4T=Rf zq)4rJ^?44nV&cDkxHI)?S0cSewAF*BmYh2g_NU88V$6PKe^miA5?twvwot~Han_aA z6+S!;--?gnBt~BviF3CU_Zjs19b->W6gWviHu9&xHnNo+!0V6dG3^t8(t~6$p8f`F z6^Qj(Fp$iyeAKE}ZPRTs!F5VI z5U|lXN_0y*s^t373jjL3Bb4+cEQ>8!eevnx`$*L>;DPjxFQ)#r45<((Yz%(k_BO{y zt$5b&c{4ZsFY^E6W12E~6}kK3lXF3Sn)}x6$i!O)38Vd(?gKF|gJWtd2YAS} z08}{t9qWdak7(;>0}K5B_T1Li=1E>DzFu~g52`kTi29MEiJ^Rp15s1w*>d;x&Z*%k zlz`HMNX140P~t;!nTSUsWflUEI*HAg#EK`#5yzb0!V&i(-r=e43g3wT+O*nH762?h zURSItRUzQ<4M=XQw@*2V&ldA$tWS@H%2Y+}j&C+e{Wfn?iU^N->47T9wa6*le^~=a zNgX^DkLe#;0D08y9dfywY|T~-H$*GyUx&1xSCkc@A%TK5H;!}@QpNu85yeF;`;SK+ znU-=2#y2ADP4_*tC9@gH9yd+o^kVuOvJk)|>0kJgJX)Y7g1)@9HHneN^pP8)^$yex zlpyX4EH2ol4nU@LS;8k}9Zmk~CFbF0qYwv~uz%*~Q`W=Nr`% zJQz)%T=1?A0Ty)s@^NeCE5EMDqWOS?%EO^rXq;1@c!DADnc)P|-GI4*&{=jYK6vw` zMD7$p7JwRk`5ZgYq;a~;b)wiN0Mn44CRL2`Q=;t`pC|DljJ6>m>$#!U_hH-ax6RB! zLpVVtHS;gpm>bc^b$Jr-Ra-`dt?y60S zHX`u34!nXmP|mUSL9-D0ZXjk~!mQ7`nwyvXjO2!@(A4~Wwq@~=+xd`Uv);i8M7er< z{#IWH9|UP(APz;^7rY@zyR*!6$Twl9Z=t;ie$;44Nbpy_XWz(cxBV}=?YT@b$W<|7 z$Ha|qhj!@kS&Tp4rPMfC$|s`+nrbDly5cydUWiZo7Y1eki7lWI1MUo4`$5p|iiM|#|x zeL#*dvK<`hKzrE!U@$V7)A!U530@50+MVIS%xnMkdYJ2Rgaf|wA+6iO;f)EWh>e=t zVGdPvUDdGgUiD`KNMGKnCi5Iv55lO)%tMqJtvvr66oBz{4YqOnP*`3gVBzsrzjyD8 zJhD(B{~jacnY0W?JW2e7j|KPC-!NL+GZh=&EF&g{q8BNK9^A2YKuHcSk->rX+P~@6 zh>+rbT`cB7_9ALEv%$=6hRJu~)A2mkN!7H^OP{{IHpzhjF2kJYahv*RphnY5%TpX+ zQxY1(9}1mhRng-dnt;z_pFJl(LP3jZ<6W_v3-D5$wkAj`Zb&;;Oz_GI^^%1 zfP zY@h770&G}c{b*h-W`TQUmn>t$0 zq~F7M*D3DC459_jzs%!GTE8*Y(0z_)p1Ua(o2D-^oYbhOL+W8A*zUD4*Y<_(&OKG> zxk8xEK)_&Df{DL}Sz9Rhg$-@AO zhHN9wtYQ_cLoCP>a&*4a$pL|vEnNqYe9;Lu$B&f38#l;AslyvW91=6l{Pr)3?{ius zQ3;()8x~+MevtY&14xuQ%SR2OKhdTG0qo0H4k-IlVFj=+llxVi4#FI$Q+e_oFNo7e zy?sPXd7mH&!#2Sy!OYm>7TN7UG){tRGDx3(5r_`sc+~B{=eXAERoP{-b|Bc&j$u9` zGboP|c#A@Z^1(3~EKm*agb!s8@igGE<(4p)v`uw@FR~30wI%t-iW58W9M89Pqa?boIwFS;bgDMz6Ie%G2Tw58=sRpy?4)KA0w297qaCrr@WT=m_r! z-`>8zypUbuT*6)w0LsE(`EdxrhA4*c-c*UxgqLpKN*jCud2LplW>sQNT1wA^(bM3C zkeh( zcDg{xl|BZL4aE}*7R%CmI8joJYc+AQXF>8FsF$oxG?j=`WS3k{QUqM9jhx7omK_LH z(Q>fUE-rpatdy@{F}i@-4o=29PoyloWP*FPiyvU{5ha&8(1PF?VtV238fqvg3ACKB zo6kEIg{-#f!V?4ND4xLEVL3rj>zbP4CaM@*5JLOPRjpvYLP5P zzriZnh(U>JTy+lSh~2H={1S`HrZ)~qeEc$M{~<2{96d{$x*A}XCxCShFnad| z6@CE7j1mu>9cC5=HK5UdHNXVa^HM^tm^G*}hABcaFUY$I1XSnF%Xuj?2UpbQm)fRH;jZLx-Tj3zKL$Vf$4XkkA2|P%R+I zhDK8i%JoA^Ii}a2=$w-s&tXH*rD`SV&1GdX)%b8()%35{jR6zGx)4 z?g8@CC9YQ@mA)*~Y97$-Gf$SO?rKF`TP~IUxweXzEKsX2{Hpx)E zjPe1m$?oe)OpsK?8sJr^JR|Mp{=kQ87Suf}AlE3fdm$$4AY6C-q_J$2U4C~%qZ1z|>(#I( z8XLQSdr%;#NLtDcpe%=uK7XM3f9s&-hV>It*K2#b@kLXy=;HkjU+gSVjOEjsmm9nR zC3IE_MsRQ|`L?YSIor=&j5K$1f6;=U>O}19~WXNW%)hJ3xX4&1!zBtDxI|hic}t z>$y5Ao=aw$rzo3)la+G`x0(RlPeuQDzdmBD=IT)KB3to=77+wP^exe~A7#WQ)<2-J zs!x~n9z;@R_v_*Ra<+D;n5-idH|;#P7Fk4F4HrcFi3(hvLjVLKq^K0s+6~ZDD^k34 z3~-zQOaS+YdQbB7zk553M^(m@&m14pOZ&g*PF!tU^C+kHPl6E;Rgy=W_ENk2DJc#{ ztix(eTUo5)U$j11IlJRsssfdzL8%mLyKcloxUU!ac-bQFR}o( zorbB&(!B2C#1Zg4Z~E28T0I9ALedqFhM8Vmv0+lXgj@&Kb<_%fLV0{7Nx&r~k26BO zncXi{Ork}X-m*@qO=fJ}g^?@S4t~sq;6XSA7ehTHJ-N`9QYSGo}rqTfAl^qhw(%jdLr?r?XE3L3aOZ7balT^7Or5M{J6+trC#v18)RKGGe#m{;N z8t_cP{+*#n{?@C0CnwQpL?m%D6)z|oNQ<<2XdyZjR~Mk9 zX%DCB48x@0^1V=GVid)4M2cA*lCDTgV#COtXBX%&0>^|lcvGx~N2w1`ZdMYo75%uB zv*Fk=LZ%$Xy-}S|RNdV+%&J7>jYN*fh)#{8t5Vm~&|Q2NbEjAvM6<@DWxxc*Jt0^5 zld^C|&;kX8XSilyAO0A1K!yq$W7kysNZzrFOij#=mLA|(3R&(%q8$D$V%cG03<|cZ zkJ)&=7;z&lpZEGUhcW@m=i>lW&2rJcm-pgrauK3};8!X-%XRuPs!UszX z)3GXwNX72Mt=%E+u9bY?riRR%A%f_T(c(xTf8O3+srvr5`}wTuRr&PTzua zv4|~|*OO=mn;>d5I<}MRVUXp0E@S$fgn(uUlQg)+sHb~YJFjA*dEpy>Y6zen%3S7pUtbb6cahG3}qC@~nbenu)HWEi~BpPKQ3#U!Z1fA6&PY!qL)&mH-SVz;{Bj0X9s*#miYT*e29 zJT$hF#STG{aXaMGDKxVIxubH$4k~YJBY{Ayn%k{T6pFfXLgC@!WU8eHkXyRu-fb`7 z%s{**wl8r=(k*Wd^_^TVt!KEmn59AC8r-qDv$%c?k|$n_1MaiR@*Ufn@mNsm@8VlpNl>D8d0$f^b zS3dH$H?QXH@JU^A*WXPoJa03xzl8}{n9#W30bDNC>!jtV)Tj0s^w&jU_p_iyn6F+-%8Su+6XtyTcqSyXgMzgrlOD)7Kg;Pck0`hhUk2C#80@fq}I0DF;)Wj<+vf3cB8ORj$$d&iwt8F{dD5q4w`eI*9 zzY9q(=={kV)QM>>Rnw;6$iD59r?U=8?_8!Xc&mRqb4c;mpCpz7El{ce3HfiO+5oEc z(d^$Q_L){1jCzHBiy5<8ftsYLF26`UEy#pN82a)`$;&`~h+4o($%_)Xo{~QLwl9^) zdJGeo*-7JnzD5uk^~6AsHmZf9>Qj^*a{dm9+ zqWy6t0Dp{Of^bzRwIil~k&kQCc*hWth_G8^u%4tYkT&Cz3rssWSA;{V1F4?uL5Zxm zKl}wGx-No9Vv3!J9{@2Bq^};(5Jy&2ol+GNnTzz;5xP$&=4)0jo@lgnUTzlnbNibe zEsS`XG95Ng2@CN4$iAE!sesE_4M&+4YA~52QGED}6ifo+jp#wWAmzn>$SNMKpCGGG zaFK1zZffJ@v?ULn55ELtg-E{su|mKUbHjnFxTxL!Q8F}l0)m!axnK3PxtqzpZ5_@Q z#e3$*k`1=~@7E_m>^+;AZjRH8zB=)ReQbU-r@HhT8)cR0-^ zTqwvC0*q^0l05-?)aNFL(~KfF2op>Fj6#F~9X11TqbGrYie0opq(!b5x&Gc#oQd9- zJoTQi(v<*Ps+e{Ge(it{;T&NB;SOX+npxuel22q{Tk=B=Of0P2iAr*)BK@gsXsRL3Sy)?u$_&Ioz2Zrdk*n~uxu55Czl|fHCH@@Cm z-GpJ1%5^t0t!-7}i7$s$mj`u4w=tQ%^zPK1)Kkz3(bNE&aZ=tHo^C_N!34jfIqXt5+dGe{0*J-LJ}N01HPc-WF!(?N4RNzI}_r64@44tzJ1*J3b;SZ-Cm?u zK$b$B>Nf==O_!?xbs`KnfHnHyIRa*(N=v#i=Q4KKFwH!wVQwhFHE$8Y`U5CmAsvye zned@C5M28d-JbYXam5&0QeXZo*l;xd1jCs&tbEAXVcpANLJpb|l$k zv=-<5aj?5_50~R zGKe1v{T!Y9+uM`<-qgOl#cI%>9YcQRjb>!~N0wjSTy(7t!Mrt>C%*i~jYxhoF{G+8 z<65jB>sSl!jc$4$k={2yY;f%8b(L_hfjxiUV+SjtM_kvi$Fij+TBvdGK-$4laFIHUiLG-(r}soeWd&#q zV{3(drbM-5De2P2eQE(J8<|Tyq?3~ll5ylOwcnfxl93DUGC;Gkl%7-CeAZqVNKI7T zTj5q76l`*bX$uWO{#F-2b)qN^3xWS2pe+RIrUS$YDL}6!eYqw49o;@5aU6+Hv=U@# zo1F1`IY{dZ;0VO%JOoOP5NV9fYP`TO&8<3j8)<(-5|J%<7D`Jv(+2`Iw3ZU5{^ZhH zq&Z+B>1YdNfz%j#^Ir1c-o)9JJxo;pDsg5vyT`9Itq$Fm4#cS||Jj5H{%+1HwJQ~E zSu@eaHwguMy5*?RqsbbaO+9t;shalewy@{O6rB>Oo2LKfQ#HX)k8D>(u+lbGC_bVq z3Tt|Q2_PQA#8=0{=3<)B=1S+L_FmGabJdMvR|Gt_Yb7*TDPRASov`#pW`KcSx0*aq zpq&N)t8vnqG$b?G0=t@sjDuNIUk>^y~>_RHt;m5d)-+Bpr zI-=aEkBuQk<`0tjX3kB9)QxqBAV{{dNVR6|ycnt~W1yn#Lafa4hU|L}*SmUQTrEfN ze|HCx#W<4A{bCow`e);?@Vx|hgdc1|JEIdS6lWcJ<8wP1BoI>L{raC zwlrwBe&3(b1b~gj-xaB~zuOER&lUW=hs5uQuz|u9s7a-I3X!L1Ri((9>VJQB99B4D z)zfkRCP-~4DB2jgSxdLALv3CB!~eZ0kJl7=`?Exu1QbaHptlSOj+RysmW_Iy<}!-Rb8@}CDx{-7s-t5+xr)DCP4Cq#Um-HR zDSRsWm${2+A*+_KhW6xHzQ_h-USjtEp!3hZc0kvN zWPG4?Uumao%~aiWJ=FID%RhWZ**|VTJsYIn9ODKrm}00v-Iq@$A26xgDbE{GOfg~K zfIKN}!1AB1aC$0oLN76xc-X>=F{MR^Xxrw$yE5zs#_ zDh>zN=yZ3YNc3c;+9xOLN3}7^*{sY!89i3cbQ--^CuNV0KtNY|-9-(pw72O@-N%;` z9|0ZQ)hNJAi3F{4P2&DC~-LF7-oCP#SNX|2W<`Km!N;dG3Tbf#nSv)HnB$W*V)5bx?2TqO&F$eA{4p7^dD)|RwuV#i{is1HruoxjTsL=@`eS*`UGlDut zrxCfU7GYJz%Kjj5$U@m^%RjbIiAlFkfJJ{#Il0n;N;k-{>E|GOzH6$Q2o_7byHuFm z2E247hczuvusMw@q57sM^H)!59 zA)%yZS$ySJm4atZ_a=C|;?7ST=llpaUmy^!!tyTU1L2zsCybe#2OE+(Z>ce-hWSAV zx{+QihcvBgFT+K$IEY9OUzK(hh_g69eNbG5H?!pY?6!{^ag_sm@`Hq(XWtla$s_L# zR^|1|XN9hGxG(L0m~_;jW_U?TyuRb2?}>(qj_7iDG`;|cmzL&q14qE;lDMwsv-%9W zc2T22B(f`K{9^MOk}61t5!*iBX3z8&0}3)T{i<9wki*vE6`jrIrQyC{NT zDJkHAPsieLcBBG@Tl#z8N=Gk!fbgZZUBE#W&ZWCq}XLoD#| zhrt6ltIKC$a*)5kLgq1#;JcYbvB?WLc4A?2qF_tON(PuGsBOo~Nt$@jU&2zFO17at z^L+zT=xRrbw!|9Oofi!mK1C4pwOMtp3EQ_+fijofPlWrh1C+q{KW1cM(qX<^7G;i> z{+j3~QBW6*FKC!ItGwQITpSDdeiPGEV9WF(0+z0pXx`>+c1-}5;vw-79TM15p;8xR zi2`PE4F<Acg|o2Ma<15gve7BpR>RWMpe_&fV0h|01Me+ zVt%0_{+{YECZzMm6sx=n%hAPOJRd%y6?6&Hwnkz~#rAsz954wHOvzzdQG1Qt%dgSe zlwjsuS!5vNjIo8-WHQXTe1@P+9_BS{lkHN*S|x`+jn?sN$g@t;L;FsS;Yv=`e@%-^ zd`UtMEtBwx!M@=cjQ?Aj19frDu1LUY@9BU|m;Z)uF+)g48MczZUjT9cJqkXF?cJnZ ztc7wM*1Mq&B?^tet_7Go^eok6voJ^KvQoxqz_-93+QN_{u6-8LNk{R&VgdRf>U@Ll zO|Hj)fs{s777~SYSe;$i)G$@Gl@L&Kyr^_(I#@G9-}}o|ZaF(|Z&|r}rYWBz$e(+9 zw_eeTAhz9dz+o*yPq`!oJ@^O8k{C=f1$VFH2lGe%sxaOJxn@0AvBh`vCLG6PcMJHo4+a4|salydiXG800FZ50o)o zWelknr-%zgi2EjtmgSCr&8S>M1~SaT?REMmm;DV!I(~?xo68O@t@Q!kR3l)3vWESVLl`jZ4Q1b#P}Llz)Qv|t z9qx(wvYT?43hK{*@#TP#{Sr7`KvYyE=6``!{x}b%V?z$64y|pki13|lus;S!*d^Y8 zkKs5k$Emyl1Vdn9Qjh~%_5K3hi+0-O)EdSDjnP@{^wWvYQoGoQbCbiXa#Qjk(<-BP z!{>^yb6BS2Xv(x@iP6>pxlvE+fNz}=AudR}F4TtuAI`uuw%bbC7G$<*cEh;XdZJr2 zk(k~Zzbnc}NyqW({0Eu-ZhE#j2EwV6(Rf=mxH8hOqNwP~V{7PE#C@Y96v%af2;_rwLSlD@9GO2J9UE95cxLkwm0xpj+V=^|HLHyC$xDl0 znQtxGD61@i=;q}n(1a%M%L&X&*E`h_cC}79?^>m6Q~<6arZ9Dv9oIjKdFe3W43* zfH$ho=v%(>64N1x>q=#;DpxXS)o0X;nEqns!N~Q{?9WUvz76RSK4F2l*(|>?Gn!W0 zlQJIaa^*fQ1f^QM{K?7hw^p+d7 zqtjEGy~A;OUnT?L0r{K*NSrfw?&{)W`mTd>dw;7c$0Or=fe%Ms_~96H&%gMlZ!}3* zkk=q^eElew9QdFMJ0rPGCXv|e%hz`pO_oOiV;IFhS>_7--hI5!6kunFJcLbr?EJN^ zyJ%8VOdF1HzbSYFj%jh`C%L~h2s=>!fRsf*C2P8BWUdU{}1>%}7oCjKPr3 zm&Vvm2tk)J+6PQDsM$Zht4xTPithuNDKsi@xj!JFPai!R}pbBVpJt}QDETsh|s zZuo1!LJ@2efEW-D`P2B^VWOSNiMOONj=Nwor?FXgjA8>o2wkXr)g3?GGtlLh(>&kv zlOG+Vz0!N@x<^jZ$Op&YmxXaUo<6(iQeHsO?rVb3Cc^aYgm2bjv626TT+itSX|wYr z{XQ)2gmjnlt(+|rm9C}xqt(sgL71A0xg5F$T0ANe>c_j|b0)saGblIv_Uu>sR4hup z1@(ic+hanwxCG>m5=Nz;HykbdWMDCD?UjkSEZbVudGRITfb|VqR-qTbA<$P3@+Sw; z0lN;@5VqsBlI<3wvbyHE1;hcXfd>RON<2BVrc;Txz6z zxbNkhj{^AB#28A4qc7wib8Q$LNH6#LcI)n-1muGc?>1%`*Jl_jV=YQex5s zC2L12^;i?NB3aXFC1-Rgjv` zZFCm_3N}4m@7YqioChg+ycnFg*+Ob#JD%CIo~XL8#l$c5jC$k@x}8IN_JnPZt=`%^W|HO)c{fp#F?Vw9 zI5A-~w%g|joV>iu5HDONl!yp+^#_u{HZ;K|s)>wSVrXQ-9x02c9fo2Cfe5qf+2N>C zEsTj&@JbMWlY=?;bJtMm|6%JbqpFO$w$V*VZ0QC80qF(-=?3ZUPU-FjX%IvZknWJ~ z?pEo}O?P*5ZuEK2`#s+|KOHj0UTe+1t~uw;Y~;UbupQ8+TXTMhO%By?9C@8~9qTr@K_9QJ$0*c9@0^RI>PHGiU!?k0_s&2>&RfC3uaLxbVdo`yo%wA- zUL;KK-@WkM^>!C=V-&KNpSfTuq473#J~gS&IhW+6qn%(FKnwUL!kaKhgtp^na6<&d zB_+|Jh0srI5m#F;bI7AlIg@&^32vHyK~@224f|nXE{c7qPh{p|&-qGhZ|xw|#&7#% z98~PapCu?)8Gw!$RkI#={b3{_CXI*`vfxLF{!squ>C(3kj*uLsJpxl>@nh6Y7;3q} zmB!0-Ua!mG)1zKV_V%-@BT#0KdqfXIS)J#TgnLU*OsZ(rV>Er z8Ef~EV?V99iTgU05XxjvQ;D>T1&M)x8IY${qWhw>l8V3UL_j?yE& zJ#vvf+|9cY>I~Lg_Q=g%z$wr1^R~Yv*KmgrG}?77)>|EQlh)T)=%=3C*h~4%aUOk6 z-{gJGF@vf;dBQKVy{t)Ykt(9%1PN!k6n}Yu?}=Yo;{@pn@L4gMsmI|R_VQzUTVao>gelzIsbrw~Z}xgz zeV^yBVX3G4so-p+D$L7Jml1?HB{1XgSQ~3n%#$Ob)LT|LWNq!Nw@)KSUN2?i@%B9TV zdWA|nfH~sW^d1R^{rY-`uy%?rFDH>TvQ!drt?xO{sb`nz&1qMzZc!Wrb}^JQWbo_| zo#dB0lakJg2ts6v`?svc`Ej2Ru%flyQeWY^3AY>&Z5Fc1C_v(4-=0**W%l~+tK#q z^eye%%fT(-2CL!*JyiE5L2n~@m^|)VQ9(!qsoRUNBPD+RY>AbO+qAYsCVD8x320lj z8XM>zb}a?C^Rm~FJlFC**gNmTxrWujUem+A|J=adCXz>$yQTu8(*5OJ16iDsbOtU}#l z7|J)7J#3Lz+gxoW44uDnb_LpbauCL1*S!_~c0|pc^7o%&8!t!Q(r1iSled z+Ny20yMrPWW= zLDexMw$5xLqg(wFgf<%d#z~WMTZKTl0JibKJ)V))#8jL%+s>BaRBB!h;y{wlz!}GO zQYV%Q=149~w^Kt#-x!u|sU=apqt9fJzo#4QSUlJT!nlV~h>elunr4{YnFvRN0=d(dts zkGuH%#QdvQHrz#&w`D(ea!$u5ZlE<8~dJuexd3udA>P z*ZXkT3sJr^%DRnXE?mLo((v$awvN#b+o*1G>qT#dY!Fw&Rj~SQF~Wnpn!R)yI9a>) z$>E&l-N4)LC~=)1jlHm_Z(L)?u#O)3f-%pfD|)~A%D~sPgR@lo=7)`CVc17_8&&3&KMX1@97npzwdASsTPGj0?2h=sN)-k?Dytqf@2ChFTQ&wJydM&% zj;osu1j@s$w`x|9a8j9X@~QZ{$oMzDqdFd?klQi7@)h{7d(MCcE?>t%7T2ga=?oqF zxXf)k%ScY^9+X_KkmM8`b(7AT94MK5Lv(_}|Hq-TkNi*rFRwFH_?ky5D|kWg#x+uM zy6~X=&|9uHCJ;{M25Z~zcg!0YSpsdW6c+3gp+huoaNZToCtQ}}cS@v5@ z{v;Z-Hby4GK&hs8^KUP<2`;*1!Z@2$9j1r0Onp}ML83(^=w$@(F)y}V%sCpl7j=B) z$#n+0M=EXu-nJ`+x=#MAEa@CJ0W~GF(N}GZrK`mp@qOM%KTZ-@F^^&NnnN5U+|(4> zStn4!x~LfKA_80hG0$d${o z8JlISVsMn41Gx7gGHoO053ZO)AGS-2HoZ1?x?!^%Y~)1@LjCy}e@sTP`yEXC1lHDK z=`Zo+ofMQEjjVmf`b?%t?z%LZlWeS?)`ecbR8|@Gez|F{yB2o*xMjhs8;Ed+IH}gm zlI@ypZlr@VFXr;d6CKIIGyGc~QM4*DZSJ~z0#mI$@;T}wKB7a~CNX z%4%BN-lwEoZ$Iq}e;&4K?Qdb9qB>^x&NQ)&7DWeNgm;0ii8$38GLqt`7Ua}sdJ04z z!#+@Djb@16r&ja%j{6>=eE@gBZXYuCwljd;S+j`7$kdQU zpWARtb>TWzm^B}C3nSf5jr@?Pc)(0!9VZL|V`@>I#RVcft}QI(W4Jfj;+Yv*FT9+r-mVqL2MZGs|X1 z|I~iF%FdjIxs%URAK8o)q=7<9;`4Gv)n4ze5L4@M`$824WXJ@9sim}v~`N3yW~frjR3uz084p>0~*AA4s+mSx4Y z6lzw8O>Hq(*W|W-enm}gpn^RM1CZTa{DZA6+F^JZ=HG|neX-btws^C}Z?li5w7lwX z^84>$)!0w;7wUW=Pswvfb)`|YOY;QDTvi;tNa7{IZPbPPK(>;^MulKQs!@{CVBBv+ zSJeVxr<-Tg)J>@Hh$}_>^nR@Dfv)Z-fhIf}D^yWY4E+ry!5kt-{p6~ESvXJag zJeBzwt44C*^p7119{id1By1!are$X$ihBTp`4t7g@nI#tuf5-<-QqUvQGC$po$zU3 z|M_Py&M37y+p+73N?2Q&4O?{#>zhVRaMcJyt0i&Vp_Bzwzz?=hF%zRQAsK99+IwGN zUZJF>gHh~AHAZ*$?!EM`M|lXJ-rt>)R5NHay!N#%aGuH{w*^C}91gnnX!> z%xN_mlLX*(1~{Q#H=7Y-Q!X20ux{ydjfP9zB$6xr`C~{ev(j%vCY@&Dn_8T-n4sNc zQub>#0DC4_5ak8aeqM1XTeIVe8+6o$kJDv^f9(|2=!6}UaGg*l=VRM`=HhIz8T5o} zP9&X6oScotNZ)G7c<%V|RLEajD7$WpO|a~{smLqTwMXK+ZweJRt93TNIi|2}=G%Na z11O&Bg9@6o1r2a>X5Ky7SfBzU(#o|b+NOM}8 zN#8gUZ!Jn@Hi#;%9Sn~WzX~idp>gpTRCTgN9yhb9XLs5v;bdoik9%)H^I$ldDdg7J zHj>1$?M}_7Li73yWVtISZS;=3wA4Z*|xptI^}JSy-;VLc2cgI+;}WgDDeNg z@7|*0JngXJ)90)u(_y9D5{?uiZE3Gvp*{88(|*T%rI%Z~)F61_Qi%;!0{vDCypymo z!EPl>v%fZ8YzDDW<7u}#MNl@~=XClWsAQ>YBkSTJl#oy;agF62%|a=^|Rr11yaDB<<76#3cnkSF^X#AXVfMQt>-3_gBCwn zoBGzVn zif*@u+{L7aKCXXz+n*~=SM0Y?Z03iNJ2q`X7wV)A%l#U84Cm^k^E13Wc(S5jqdE|7 zswY{J&(DI6S4sa9s)bu8YdYNl4m$lfJFSuWKT<-A2g>cq=I}B8>4n1bfW|N!W8Jd2 z*Db>nSkV>atQ~3_TL|=#6J7nfs&I&x`$>;lXL86F`tm#q=ELi#VtiBe5w4C*BR#br z>{ipgb$w|bbGM>t(^t#85j7uaHAXXj-T!9t{}Rhll2K3eh%J((AzJa3N9Es-|DzZH zZZ7hLRU?RrK!kaMEiIScQd(W9R&?+15EN20R=vN!4P#6LKTfdjG=;#Rs~WOQPVOL-tP!sT|#Li*m zk*{e9!8kQyW5RT)1gdxHhvLA}?EWwUv;^A!F_-^PG){m`47}Ml)1H5C*dZciwS{R> zCF*$)la1LY^4%J{81wOtZ?e@l#(`Cqa+{v_LRj<{d{5bo>b~Ojk;=}LzK~6;2)$-O zt0Be3&vJwEr(c}-rIXz@<;iS|*TJPXfj}fcnNS$1U1O|ku$os2eZy?Oa)Cqm(caK0 zuonwNzE)n8s^J?@HQ9i9d~dj)$X~i~37~)f4B{^p1^fd69^i_EnNip=UtO2ZTzYpd zm?-wzC^|3JhFx$|tM%(OqScP*!+T8`>GK(64`&-c!&)`#rnJbF>jvDx*=0ZFSgd3` z#zU)CSe>e}wKl$f)LGu!8&2i&N)2hNn{uJ|N1+M!LneN2t^vv?kbO_3@qP{k)4{j7 z?djH*2&v2}?q*=(tq<0Jc&;EhH-fXMy+CrA;H|_Df)ZAwDNLRddLZG>ZpN8O% zHWVtMM}U``Fi)|W;&)?r&YMeCqA8(mwX~gyPqh-m9ZBNy;?6|171Kqkp+mY0dK<%K zQ)#f8j-kX-xs4#b%B^v?Ie#}Wr2Pmn@?((+SVK4dzf`OoV60;|SIhC*pB4Q*E4er) z)fTdEZU_nG?+Se`hvQ=?YF!uM#Uyl)T933RN4${6Hc5ICtSyTV6)I+!v96wF*J<9> zmM?v|YxFx8k|NP{y=+PJ))}=d{)I?7gjyQd^b#W;h>&t~={0(=TJD>_tIgRkhK?r+ z7W+Y8Jpw?^5K4~-3)#P*_P?j20kld+GOtK)>ITYfZWNdOQ#v|oZ-G4QxyFtQBx?(@ zZcu*n!8E3%tTl_XG^A|y3@tKS{{bED{)u)v4sbU51Ku8fJH0p2`66|A>AVJE~V}GD6MhlX^`k)R9m~Nkb(ML)8!0K;x`}7tk;^)5QGD_%X#yc=_|H7WX#;P z%I$%n{`BI_YZo9JauJKUbt_+De} z3A957;dv)I%Ab8u5KZac4=6YkP}aEGEh zZVu&q*0!Zx0uol5%s0=X`K)!P=HGYaSBmHu+)@0Dg>iAm^&e=LVF(pKc48{?iF zx+UCO_CYJOTk~-gJyv$@Z9(-B89W7>4X65`WA#~-^7Od%Hh%;t?#7+$c&&o0<1=y{ z6V-|pe=FoXtnr7#Kc{x#4NU)Sp??rQ0$Y0uY9{AD7(Qb%loIGQdPxhh7!?E_Jhr=i zyZRMe_6%hcERg>W=q z7SVOi6gNI0!FEhFu1@w2?UVbDy2+EjwUc#8HKWEmCk1}omo)MJ@cA7{fc^Cq29|4Z zWj&jhval>WcJfnY?+pAVd~SSdEl}V*N8gbZcy%0v8o5ZR^uhT(8PDO2y{Zp`rG6v! zC5b?LFm|53@2;v7)h%;Iwd$uoNtKCJWOkGhKTlj~V+~F(XY|)=An)I(^Be?Wb^zeS z^BixO{p8NrhxI77)5Chm+N84Fav&yLIMwqLYK_$l3Y}U>$7W(ocsTL0u)}JCwp}m( z{Q=?qC%fkE&vB#qND~P?P{~(9+w&RH_Hy4aJXx?xap9ttqi+nb16hZu zpeOZN0#1(8d{Wgk2E;<=_J(q*%?3{9IEC#$=#q&uD?RQYM z@h+x=s?~NJwJF7yGQCJwOu+qzI3X)D1?k^v?yncUn7Cc`7O8FBne4@0iHoK(AKaMP z$fn()-CZ0;=Fhv>v|casMz!AUnb|19-rfs51=!T>2B{XS;nyzZ=jP7wD>zG%edOhJ zKgp!57vP}MaE2aD-RQS@BY7_Pj)E>BDFemoZ5P(>2yMdjwG4 zcQDWo zMnETHF}yUfAz}>j%WoxmGyb<12okJ-T||g1r|8O-tPqmWP}I+xwH zgXJHy@??JhV?{Zm25uqfpbc5k`i6Wi|*?>g5zO?gJk)6;mU#vdGP zy3MXO=Lb#`a&V*FHL)fDX?~|n;)$o*^nL@;W9_t`#bM2-Qot+D+4GS1Y$s>dG*Pdx z>}OZlx%hkA1q0hizH93bnLW*q-Cr^do{-87gjUysFy*2sqD(x_hd2q~z_#}dw+Y&4 z;i4UHuyOU=q4*zGK5Ou$D~%f$5Vy^nVTon)Cpnt5=8Hx~fpP9CZGQjc_+Dm$f&s$G ztj5VRq;WA$QI!7nWK{D`q&lP1;%6DOT6~mVlL)K!+*yn-zNtmJ&QnsUgLsv1F0lOR znU*qvBnwMVQsRElI-x{|KylrGsyMNfWm3z&}`;4F6Q&aC&|ru<$$4I&p{(~~*s1kjzQxw=bJe79kH;^p_1+r(-K za;75hn|yYU6a5mSWb2NEkbO+&c1$Hj7OOWi0<{!aJtfD_kz1eO9Hc&0ou2vHo)Hx# zFP?LiUNQ@~e*S5-roY*fCLdH)FB3n9LG@EQi;h#^{U18m$u`eVEk$&>+G2N0&fWW*Bng-cJG0X04!ZX={`7coeq#5hd~E_|L!_f zKs5aPM40!DX|~H#e zyEIy~)%OIv@N1u+AXjjAUdK`2pWD`Y%e6Nv^FL+yxI!Rb-t_Ju=~MhLKxca6F80|Z zFqkuf?1%DcVDAVjAJ&Ynn@^4c29rh)NhbjlM|^xdn`RD4#G z0SQ}eO?3KiP>Cb|+Nu9H(wL(InX*<8LsBGd-}N(`e_{#-y}Jo@3<|O_4ev=kHSbAF zl2tftr018MIszWI>bBrY5?`NWejm@!u52SDmbPrVSvhVvu(-yaMSsOf(ubk7xA%pn z2IC>`XAMk*p9^8hQFDLPOzSylZ`q@wGRsqYJrsG$>+1HrAOjZS>B{i0MqK7i?HSE; zM!*4@y6xi($7?V5XV&v@^+id=z`+`4!z8{pq|5-76e?C#w=40Ji=IQ|XpZ1pl|iM0 z(QRjNLeucHzv(WbjX2|D`Ra`n`+q0VZ(##p`k^a$eKfsJ8u%qp4z;gF4(n*mWlRhY zD{$vixf)OX#gL$|V7pXW#<2axFO72y{3h_e$EXtS(=BV%?CzkC(90y()5tuso|^Xk z(UjK4&=pz`LcobiYPj7>Iq5nd>0jL3B@ziG4ix(Q%5Xe4{7XU52^;13hGOCMvXi{A zZ6uNFB6^I#lV~P7&L7EDK8kmm3o^@dDkLo)?`8VBi>j>$R)jVmoTLK9HaE^vvK-P) zHI3(^9`;B{P(G^ue=bA?T0C>FsPjLZ%3;?4628LU|-1ALPBQ z2jt7+XH3tDn7+CRMa9}ftn*^~4humchZ4j))%$r}!J4wlL9t>h0S`aTDI4;9pIZ&? zhk=A4{OV5`FxvI*M1(;=UxdKrq=5akt{c0Nz5G*{c+Q9Qi-`)?!+A>dC8y)oSvSa| zQGb3@LC`ztCj54ajejKt$3~iqlhVl~`b1sIK8!q?h2&TQ_lAD}OhNs}ib9XfPsh9G z#cjM5Zi5hMq`fEkwCkbdgb$=M|3~um*FMmw0n>o)-5r^Y$MhmmZgW}Pf!Z|fcQ4On zR&YMZK%*fUy&1Ve)hBCOaS?65h4oxQ+Sx;XBCs;Fnabw_YVWXv3V1Rf)Zkj13~}kP zJSTM-U#du zm+^7Nlztuul?@MPbOdqwZRf)a4Qpp1%EB#FF3it8*oiTnYrT=i8I3@oyC>wd?TLpc z>6wrhsQr<`OM6+jaNIG>bNuGst=s?|!YFW^uE2VgEU;Rb6Kz?u{Uv{erG)oOP;ZUR zv`rZ*!)o#0x^{)D4K8vod{e?49HV1p#e;$BQk@duaHl~$=sXoYi8Qxn1E~EAoB4Qy1)KU{r5~En$_>mm&5iswtABufyQl=SV~K!EX?(ycoq}W@#7Rpq zc-1+OCuAh#mHofy6B_wwIb=|*sm%Fp{QThb&*E|F2_)=nlhv*wY1qV< zKks;fsJuI*B&LeGJ(rrD64x-V7m8}MTYOZf0l?t-6`a=-#!u@M=l918`DHhVmn7}} zcOLS>>eh{yE9Z8_?N1lYd+pIlb@qXF>!fW4eNp&cLYk##Nz-V?*P8_O!FXUCEIJT7 zT-}rD;H@-7_;XVNzGT08eQwo%P8(p4Ia@o!1;bvl9$$s}YBE}D{|J${>~mLv)H-tB z#koGcckUDgH~5-5+)eoHr@unU@-omUkF00>lImj8e&xlv88KzUDeB4vdT6KJ1r`5h z6x{IZSIMshk8#fyyn4?6^*h5ZwZ4t@Zl!So=ZU@4*L3!#x%v~T zYMn1z_mGs6FM@_;L#%1wzW7cSfJv6UU-8qpbgl$`8pUv7c5dc zt@|gSFe854&Rlz`*5FixZKymh83>jeq`=cw@$R6e)o3#jl{Q!VdCWm=rbX0QP7~2V zTcr>y@*z>pPq({*UVe%h1sd70U(^Fu!ZI;0C;^^=`zw$>- zitp|@a)wReWc6^`)#mZ|vPK9YFhtPZp*@-kybxF8wsYR8H&El(_t(a{bP8WR)fr57 zdkqeO2$Z&2 z$Fr&ZnBPIKVNO!JV~nRSJme&Yq~S=v>OwbP;rk-K@3SiPC@)ZnSDI32@O(-{U5foi z+Nc4HLQG2m7<ffe6(Vx6Z;IQ)@lLQN=Y11wYxI@Jh>i*TZ*qh)-3EV{jF->rHi} z9&*g5PIrLwV16@UK~eN_@drx5OKPIp4Nk;2gUJS@UValE+9tiO5};$Ko!uIKm<|PE z7@`VJQxLzyd?Rc}8t4edp5K2PDizf6j?ak+1SS#+`01ex-kkvPt`A3y zIK<-6LCa9~2p~o?9TIq`q%>%lI}U-tOW(V+Ksre#)AsebhCtNdl&ud`?2On!f*gn! zyDnGe(wc7XY1F7M8^@F24e%c)e=!ED@z5ABAwL|KaLH3qYnc~Dz8fb?A*Zte2aI9> zLVPE}b{Vw%Oj}U_S#7R8o^JbsjZyw*Q9~WSmk`XsP+ogkOrEYikSJMN1`O^zk=m$L z6eerf z^n?BC@Q4fP8|qcAI=@rScXRgv-rkp=3_GDcy>W!$^ouuhd8Ts!G;iMmOXb2Fyh9 zOFb)ID=+J~(va`R&bzO^&8dyjD%k1ZJP9Eyq~;WCTU5n5pAFndl2yAO|0qjoKncDs zf|WxKy$hA5$%&+pEd9pHbv-^VvO2Ekjc|CkW^rp3GWhg`eZr~#)e~QBHq$Lj1Me6Yi%cOjZ|`ryk1cOZTI|eB!7D|5+!Kx z>JbU##uba(a`X&3;VnE)JtPtvu8Xb&`GQUu{0l%Q2zn7nSoFhiqr@Qjrd2{&H|Y@d ziWT8!Baku38)U%Z9}3m7hCl!$@~=Vdu03$#x{C-My1dM67g@t~DB?<^+dx06A`uVI zg7gP`du^opqKb#PA_!tf9f#DbqK$t_k#T!@H6Rd>)-W1guzC?(a3CVf<% zb9|E`wDZ&O_Qo|ls22q+-dS-RCG`Vgg+)F{iigTEczuBRqpe3HuAuTDv{(i68^NLk zZCKYsKJV=5g_lw0Mx7 zP@9|4IaF~`-5f&%;`bomaAmEF>+TMPPRDyyR!WV5Wr$8|Z`4En2f@yOU}2c4PfyH_ zERSrTD*a-~RVq+$C#2Kh9NqdZ?aVGhVJ(NN=X^Jh0Tdq%+QQUHI-6mrdd*R+hQv`;P}ZU%?jP8T{(_Zl7RFKCYFjP@JSh~j^I5->w27IyR*K6;^u z7lUR^I@4BQp)EHb;%ak4Mq9Iaeq8MLaImnaWZtSIv4@I(`e(dcWZya>`U}3(W>G(p z-+@XVR!(_dP?Z04ljD55{s*?KzaC84W_`TsJn^X>znzWjc<=i`5Zz zx`zeFDYMp#p^kJV->TfyEiZ|81#{NN)LdGV8WM?%Fmu9@6ED6&_(O`$ga>qd={&Ig z40ATfaTW~t(Q+RLq9uACYnGQ_Lfa22pj9W3&7$~s_Wfr!L(jT8=Fu!-~z#+Yk zQnPnbYydyyqY*;>lg+_sE2V445oBiwPeH+LZ(@ff+dCK)YW;{+?F>b`1^SR z$J^id>GHQc;e?XbyLfL#!Q6}J{aNkYZw0`%m3D0-mfx_{bynK_R3&EKlbjU?n7H)r z@$GivdGZ<6*A>>6sZaW_D(f!4loF+&<;bIyBoQ{iK&%$#7)M3kclsI9rcYDY$@e>Z zsh)AsC`BJn=gCucsfkOPwq%w}et)dHfZOy9iy;nfi@O9mCs(f7BzWwCJd1ilLOm+X1}I=#{goITv2Sq0 zfj%wO1FrR7?7{GGV8*6v7rED2zoWhy=+^UzQ{JG{&-(VDTeNV|4}=#)G7AS5d^!oj ztbaa&NN}|^aD8Zy{cLC4_$4G3J&HdI)U%e}wThBUN6@$yLudWwwLz0|_>7}f7@dZD z{ek`dYd9p^_EkwL-lrQpiJcF_b6s5!%&p%BlJO}bu#p{YYDU!bfIILR#ws;l7+s6- zEG_X_9XoD+p5a#3m+u>l9r3L95wh~xz?sI%AJ)IIFGikYH+^>raHQrh`i)>0@{)cM zEI5{^O!=Qxna9|4QE1U?sa8LI-c*ItvPxIE-bSCmVL5A5P5j-bb30q7+l&J6p8m~5 zO{&+NGu)g`!>PRxzxJ~w;L(2#ZkE%o1bkDJZoDxs%hiAJ%8jo2x0i`vzpYS%Rij%1 zs^kt?q+K7vkzgjs#<`6^FzD$=q^CH4%fUv$3IKnB0pQVK47-C|&RMJ_4d`6vE1Q&U z+PzDPFzDocQ%1cao-~TObz`t0k^RI*OU92Hw%^SyL2XcjD4qQ-WqDn_wLjF8Tzd^J zG9WsmW|vq%iXV&Ky$UghI+-?HP5`~L>$qNGFEm{cqSyWWY6rH!x`+yw-ZMIbPp#e$vQutgH8rDqa0x($}BOPJj)zHY~(uZ-Cs0)jXLGp&DMKy!yH?ov&6 z{j*Nb`2}(WzAr48^0K9)F=TDoD24ccdI8|;O)5$kQW;2fQk&6nYJT$)+KPtJ(ZW(_ zxRax|EO204LN1H@pazer&W#lW4X|E;>ys<=9vmu98#xw@wHk+lB}%FkGFQSW!h5h1 z`Cc%|q|GSs<3_@pbQz`nFsX+j!wsp2L12WZCZfE?xl7t$X1SYWM(doXK~qtRkHv3Q zBnsTE+olK%q zPM9=efUXynO;*C+>8Lb%uma!2kf|`p&>ahn#c78x>P$|Me$1oyxi@U+q#wP$8|x z)e?ES?w>?84|PUDr{27#Rk0%etwM5jTwGiyuZDPf^un?-z00-V<;+#fuO_+bA)XVM zUjlpkygXf^vBcULiE4ZJrYU~BjXgMzZ^FOOTgp7lo8)Rtg1t#d)5c!IRBv)_iL`Mu zJkp(HjR@#JYLeV{hl~@~zpGGqw-Pk;O#OjbYZXyGsACR94A;?u>*ri%`c00u*ftmT z@TBJ(YFa7s15dq$il-c+;SqwJ5+7yTdV6I=6uzra?G^Dq4>WLY9Y&5Qcw8;i(F)azIi}a+?26l-ER<`ir{^_~i^{FF^&b}em^?{G z`y2;m^Ah!%K1X1+7H7H9)6)LrdKk?pr(tB|kc{~%()tHk-OAFk&F}J(v#zYPlp&-m z8+zx_u5mNr@(X|?KJvu`moiZkwO$|;NDsn>n`9J-QtrT4|9kciX|U9C_NT3^LS?Te zTxo2Sb@M(7@(d!y5SgZwwDW74c8@gLeCEIpU)jn#v6Wx#%FtE}WK4r<$fmgOvUyvC zMHy!#fVb3DN~#njxQvG2xr{ikxBNQDSp{K(1jtex&Hogns?zuc{#qQEI3D0Q)32v_=vG5TDlcqNvT@fg8$ zd)B0L*H6djA0dW`i-nCnMdZ6P;)gXxR$VXZ;&O>v-g>&l0dU92`y^i??VitvDiI#U z|4uz*#0#eY`OFe$$-m1_{>~)%1NaQ3?F}Q+l?+f0N`p-|18C( z`XtHmybrL9L{7cMAe41|Fc^FY^9uE@&bU8XQ#veJ9;Vvta2^|Ic-d1^SO3Iy{U{As z6ef8uVH=!nS;VhFzKk07bUPl;K}hPzKYtYNG>U2_v>;vN8^uSmjcdp;Pxjp^)h3T-?W?>4c zsr$JnQo}sxm9t83ww>|shFMP;t4;a~ghxyk6Pel#JheJ- zYK)z+^;8s!V0UpCffze=C_9y@)m+ox#2m|x*nkA9zSjL85WO1eeZom#2&$YJgExKeI)9pHmnkVZwE0G zOFhaYgul%K#Zn3XqkzSka3TM!4}f=|0AXms1uS`HxFjL(lfg~hPzqXUAT<@gN@AWJ zLR>$z%Y!zvD*P_hD+jWytC>s>S^gF1bBzloF(oskc>(TFU(C#}q+a6_yEVRIb9Ys9 zr&?>~drT2lKY~l}R>2KaMH?`;yI6J6f0PjOtNmcM^y2z=y9?RAQ-(~km;G)MOQ^6I zap?4dXUg7q-tYY7=7W|d=|sj&3)d{w6#YiashLO|rlS_islB0$7_7GGu!p;7?Y@y{ zcwk|QlKdb2n^FAIyWy|HK!fUAAcfIFIGDG(`9g5FSA&PzQ5Z-d;5Q~>48u%8`63J= zpy7RESc84DXgAU@-OTNqkelowev;**>eEesCC|doCrQwK|8-qSNr`1Yk(*9q_tjDf zkQs8=nwmeVXf1cU>`(jEdf%R9AFDr&=4EHICfu->{i)n1qgwRXzB$IM#n9>`+i7hQ zTClqPI{5t`5jgtqP_+cfk)IWC;Mi@SvO~+jc}{EbfW_~=gu$i(v*EI$4m9W1KbOoK z3||O2%MSZ~?a@dTVb)B`VN4FCsYy*a?=BWK6&4m|8~2ymEJVA8i61I#HY*^tNHoZX zA?oD(u~>Wh@m!XfZU2!kMmrfttRq52HBJ)R7yp&W!Un@4Q@e6t){Ln zU)n}jR~^puxyIGTWFr5|;dGg9|Hp`+2hb#xdzr393i^8Am-X;KL}_Zc$gQirdDp|1 zyx;ma61q#TC`EU{{&U(qP}EkmlY>ibZ&OFTc1Y=)b6n@Y_;Yyt^4VK~&Hq=m362P{QV!69AO{$P~j{ zyeD?;TaT+mb0Q85^A? z6imq%?3FVe^&7aFNj+EiCnf(di zAd|4#>Bb(F=1g6i(TG=4)!P5Y+l{ebrev8m-PFD1^GV6ERYRy1NxR($LlJ$XjH{Cj ze>>jJ+hbHZMB`su(FRejkyUA}0BwNoWe|jO5o?A^A`Q_W z5$gE&*9Ksn3R07v{l;0xcoK{8Rk)>9n35W5>4lvC(JMl5O@z(oq4w;ltAsq7HTuv# zBn=Im%EN2h*2`&Br2ru1W4qWGQlYI+(4hl>bF>rL6>AV?YN!wro;MoUiL1tH8%mkm zb=IYzf}7@CLqkIR0R!6Ng(USnIeU*XEGvVRWb43ZPTDo8=Q$9rO8ONV@HD{6m({CS z>A$pa0Hy)Qt{2n+%8HxZx8V*#$+5TF!#uasHvNZwEcG?c&6r9xiFLn$Jx+vLEcCEl zFU{H)(Zib24No=X2Du{t1e&PlXrzl!<1d&4AU#DoiP3t$+EZ{Qm_Ei`y^Ej5s-671 zjAnx<;2esX?s^xbhoE?T4Sk{x%7DIkENcyk0T%Fxyysqf89#U_&+m?5wy1BU33^>7 zT+~J{$K~a;(4hGNCT8q4vj^Mex$U>TznW!$_-5$5F|DgK@gS#xaIDjJQ0DUTE%GrQ_bVqS6TcmN|(K#BYSEb|e3LSN}#fj}v= z+)#s;o+@F3D!OQ{1d;FT9$^M}NcmZm8D-C%dG7G*5B-As}~_^kxJc4|gi5 z#9+aj7aA*#CA;D29Vh~~C}KZ9uvcVwan?Gq%8W0EQMkM5p0CNWFB!D@g8rZ7!IyUiTBITV9v9`#t^sXy z`Y2|}yssDoAqEbG)u;`LChDlWx}>&$Lra&Wzvdh+6wrybk8jUHSq6hQrlO5u7#pDY z^(C~N02n#Ay(h4gB&dNgPz+dRH|>}t$N?L<3(ALfS0TP|yctHH`K!5er1m&KBgku6 zr}Hms-2Sw{#D$FZ9{|Kadk4roFn1TChPKv(0DO)2H+E^fpi>|m#OTs8AHr};mIl3I z1o~{D4jZZFsV-R${4NkcD3~!jbV1`DlNLD*%R1^N#L~xtkuEQxT@=uH97FI$wo311 zLP>S_|f4-{maq=Y^L zXiYy}1TN{@uacdCtIYyX4MHyzR7(R8-hmKq1MLT0QCmI4JdkcpTE_XQ#SVh*qcX{l zDX9BZ4%VbtL87D|hd$}dRPd_jWqVfR%UZCLC`>;;k9YO@FLybK%5wi^3jG6%J^^~= zw&xts;j=>N^S^evNCmj(XV4D5y?Pzm3``x<*1tD?S;5tWn6l9Rs)K_84LBq7Cwg{m zt^gTC950v~9)Etl^eT$n?-1Sbe{S9r(O!F?2*m%o;s*sN`ZQu6Y92@V?=Q%_EEmI! zADYi>cp~$6tls-lD_U`aDD$&k5BdiN{V;q5s{i>KEO_pI!Sk45_XltV;kQe$#IPq& z{^3yU@d`Sq19eG%_1h!@kj4)(pxL@U_+h>P(7RxK?V|7SFAL8eraTH$4@_yj{LWWU z;{NR`8<#MMCl7gM%%sEs4m5|u_U}M1ppSCy(7Z)>gDtHFbK*p@iS0mT{vg8Owjt$L zn$9&F(4Xulx&PWLh62k>+#v+zk@o+0969N~2KBNo&1>khu;teX% z1*t&nPm6K$F9(0g_mbZ!9K1}gRt68N^s?FPt16&LCfLt7UzBwU-X2k%=Ewd& zjJ;)8m0i?7D1GSe?mBb|($XN^-Q6kO-JOC6NP9@>E&)LlNeKx-Lb}7b5Bm82XXd@G znXl)=x%Xc2Te;W50?bh>qJF15CUhnp!V`NK-Jd_hbpK4X3w_35iX_+*)B0*eAk7o7 z1vF4ebl_yJ`wc2C#C#7h2S%}~#TgbTB4Dw0^R5;!3H zpjg3mq2C;ThQEzNvyrPi*Z6bF+x&0*fYEXR0ujEJFMttWG`aY1IASaYRhBIe#TCQo#Y1{3h5~!Q7|ruUVC-V*$yID97@7=#3*Lz65+Sj^C<&8JRvks^NSw>H{TRh*>Up!)A zZ9&NNKpu5Cl@l>A_R1b#07DQLh4x9HYwNSyA_0ZU_nE^PY1P}$NVF*%e`Y6`@&Al2 zcLM4UJa~yoSA`}@d!*2LnSY220C2R{SLJ(^_fF58lh}vy6awvJzak&h<%)<&E4o8x zHcCggfTSLdxvOEiP) zC%1cm7(e)0m9(s04>ff|2a|Gh4Stqs2khwglv>~+oWA%elBfnf%D7P7{@4PCb(Msj z>?4YzJbvrqkw4Zwi^HTy6sF9=HiY+C>PTB1w$v6m_lIhu&j9tag!>$Zi>tg%`zY{- zQCP_nP_rk}2diKGWihGnwDj6lUKz@v5Udzl(=&**92=DD);;<|5f6>-;079s#e#Xw}lZU9v~2RxX*^prR3trA$aC|Tf6F*3?TJ?QYi_K zdfFlV=db!@sYavQshlE{HDQxk7|?uC^(ixOC;1OIg@N~gPU!bXB6!?U-Kkpx2!N)! zWB{TXwD&j{lQ0<;8VwO3pf2Fxd@=$<&Ge`Ztc2(cf*tFbMT7OXZeapKnQn}(H&Gb+ zsXH&Oy*DcpUVs0X8vGTiB00B(CU>*I6NCT@b_#^&#cB{aQx~R?pC6kxw&OS7I*S{f z+Aw^P7syoKb0lSKCKujD4}PTpO-TSBI>G|E2O)pf-a)Fg@9#BPT6E@nnQ?MVACh3U z(__8huKGCRr`@RMG=B94dhm&%HzA<^?C7_>lA?APua8BZV#miE+r)XP(9xp+Wnbig#%Hw^VA_Qv=OmwFLh|e~xmT!bpwbAF-Q|{^SNg19OQ;MooqdpA;364Qi;cVfAAw~>asleuq{c& z3E*T7D#7#|p`(6>pGBZ-uQ6gp;mb;O`?A&PPx?NIMzmp(CZ73U-|AvZOn2$;X=rd; zy4Hv3zhnER>%CR5gjS`950IW4DlQNzC#xiJRD-l3XVkt(;Jxtx7P?HUA$A{);f90@ zfGk0KjC*dMF4CGAi%>&*p}BSzT_(+*F39@w;q2FsH5JqEO|Cwdp!*s2&@{PFB0KV~ zqkA11SHE~El*fPj)VX!H{Ne2qKgE=;K*r7Iq$=D+BOd>t;l+$7fc0`Re~v(ZD!f-T zZZ5DVmC^vm9mP!RSc?ditI->%T&c`%6d%w#r~tuKC=}c@Ay2BR>`O*G+aK-)SceZg z3e^$j985V-+l{3bLK9;wq;R?3_;k$n(awPtdV}m|k*tOLmRO zwIK)|m_Cb2878f{H?WvoD9~tr*X!`)_NG^cI+nW3)eLy=arjZwzwPr20(jI-3xXTN zHp;=NPI|0ODczJ7NinL1dGvcMdN0*sPUez;T0rTpWL6IE*53y;P#`&R==lQ?GbjPT z1W|-|$u&|Zy<~@4OcY1FG*kkWEMvaQCZ*+xe-iTN8Ym)udizm9 zF=;|f7CCro-P!=g1<)S;Z+9pPlMY=cz}kEIWEqw;vNUQ27y4+J)~@+kHE^m|=&hbQ zAUcNs&TVX_zh75Z5k4z^%a7f}4ACEak;LbO-J)ugk!+Du4g-8g*;Ipb512Y!OX=B# z*UprU5_*PZiwD%7GR2Iz{eFIUWt|oXlNR{vd|qYw?2I>7D}2ZW|9!+KrRU5PE_!KC zWr2`!aM5%EmXZcQAwhL6CU|v#Zg)7PwVJn?7ke7mU?%niKW}894PDmo`t*S7;?se} zgYiZGTn?`Tz4h-0oB6;F_0H?Bhl9@<&P6HcZei(e4T19Vuo8ScCx}3y(yJvg9cnjZOr=;KB|I~v zsEtX_Kw?p0R(n=V#j5^xU|48B;552k)Uihkd!@(Lr-?Obtui^d|LARZKU~jl?3asQ z5lBC3;*jyS3x#*0+_t-y}Z5# z9M}XfE4~sr+(bN4L^^+=G;Y3HWWb#HC`=z=h6T0x{Jh=6t1D4|=~wXjhRGxkFYQ;T zB8$PiiRP0M2y3#Mx4ft-YnbDl+tv-K@ z=q4=x_1Lvwl$|7Nm~>umx#{d5D{CA^_=VY1njIfD7n;k!01=@0)}QURjaXjHf`T0w z!iGvR`Qlehj2BoTE4mlIXkXp$HumnvQUf@sP7k%~^KvOIX!#Sy%~|Z;PUvJJRxsK% z@A|q1#w-j(4MR&9>_^~%{&lrZQTO+{U0v;&>zN#L8^>R~`);cI3Qn$QI=S2h_7`gL zq<-B#YazGP?WuJ$>`AI`?-I_uTEUQvyKbF#`blJcSm)5a6+kw?n-~cMbg=nhdH}rJ z1w!4AQcibwyV=J8(PMH4b)D!~LqM;??)sbQ@t$!1`iSy*%;FURN`Qt7oC2$@0rNb{ z7-ADT0{N}S{^}L+Z}Pm~+pDNxt@iG*2j6T1F+oN)@+q@7h!zBafqmu8)dI6V@cULF zi@D2M3bkiTPPJZh^L#)YDb08uDc+6@Otgl#V_m*~u@Iz4*kN+7nJDt>3Vs`NH_jk= z0a;dA4-1!krNh?8lZCbSYOW{I38BqK7xV5%thDde`;PVDx$dFkd!euod~YX>5|bYw z!a^B%YX|hC=1;*_ht6ia!>rrqL&nQv0q0zlgdJ?;QYNN6x=$%bIF)??y~mx$n*gx> zuUi?Jny%RR0>D`t5E?;G5PW=IB&$Pr;cqGBmtIbq$Jun5C3w~ax8F+O)a$27-YE-N zqkMJYsPVn*@bcnewLd!`>~69RrngIAjt7XS4E zJdSrCB!IfzjXJeu@kRn1LuOqq{lvGP{U2R|2ywyRI&lxd%Q3W+1(bWb+YOQd?jI;x zzc=IO2v$q5dgHq4Px6ZZ@i9wF}fKFjqwBRPIKE9o`z zl;g~Y5iTWM8zAif&yUdfFT#*B+uZzI11-g%he=B(-VdD?Iqtl{1J^bK&S>l*5;hpe zL!L$ViIx`Mw1tq-?}vQ+jmza1m0^2)kZc4BWzu*{&#%qyPM1J8}cASJr^{QYrztGLyRRaAEs4 z*oo|6w!yd5f1^2a+__C4npTxK>)jP=+V{qG>Hn~=N?fWagir&xqSVZI0OZ>kph`Q+ z_~?wk(^n+uNC?{rp0X(xbh|tjF`v}=x*+&HHBz^wg~3~Y^x`1ibvQlVA{D2O==j(t zlPJJM>ZS;gQY1ew`*1I}&gj9*Hc~F z-f2X2vN$H*sCH61&lIV%2l*2~+j}7!FvP;UY3niK7?u2vL2j8oX~42GONUC|8enZ6 z4Q#);m!jVVWs#BB$jtE2W$XrZcbu>G1=zQVF? z?fvt5bIz#z-6?yaa@~uWB$^ac_yRU^)rz1P)S*9dvhSt<=uvXPuGN4R-OJk z#nE^VQJyerukYH2^v723Q~1F~$>T0hW1Vs+GsEa8U@wp!FajE=(EUbzup zN)HYYc)QEW0PLQ6F0M;7J`u{3XH5e6NT}SosPC`NMe?=x+s#j9>3XdM-;)JbdT>=D z7)G8Cc4_m=&%@4Zqk*%RCg*Qe_@K+bI6#8vvt-k!mE+TvKL^*(8am94kQ%;~)(PR~ zRg6;S3}!D2Hos1ORWQhZ7HWVglmUqW%?DMW=bIRA6>ergc4WtYmeI=s`>cwQ^EmQc z+nWtnWaA^4+U+JCaztHzES@qrN~62rHD;od>5FyA8&9%604IWvVX^pygh+siM-PsR zZq`>2gnbdtCz}wk>&~ku#1*)}Mt;!^)=?lGLnCFLz3De#!(2X|=s0PpmHmyCVnx+p zt!SfrhACU56aA5+?&O6#um;Le;j5M-d<_ z{fFI|Ck^>hoUta`tYzp{tl#1(0=hf}q_*^d1=^xcr2IV*nTI^;^x*X`*fZ%zz$QCv zQ+}JzIr%LMr8n_Qm_Rpnv(}*&aRx*`?*x2lL-Uu{5s|n`ETte#Ay`o)FHx@{ic5u;3tIhQ9u}($CxT2oV2U8O zX2)b!3!-gI!YvA)hzG@G6k=5S>5m`@j7lNLAY@ixW$-AX+VP15lts05g}fJ0XxjD| zO)zln0rL}?NJx}6rpCFnzo&mMb=YAy1Sem0I8jCNNUJ zbVW9EJ%jHL&VhN0v*e*(nlzZ4mp?PqmUnx)>8a0An{`H!kqa`L>nMhYq%c-$P$WhguI9 z;B18g1y<~SjlSwJKaopXwK+xK)EDC358gU`c58i#7|h}6lTwhz{W17{m+{_iAc#0= zXPvIa?71odZ(ZCdq1IBY`1~D?wlOm5fgOIlxJiU`H!1z`6@@vS zdf5Ew!4VU3#8(5Stc`PI^&{nm{+mfTlSV31ze0NqRgjfD+s zVPBWn3uA-wP5Q?!V;g57pPY3kp`zGu4lo2cC*M~h_A8@VeGA{|oruai5*svT2PYV* zEp#}o{{GtUS_;@lAt6%|g(^lAlJVNo!N%Sm5kv17lQ$!rd6)+GzFl{N;tSssje5x* zx=#vIzuRp=tG-M?6crYFH}bM?-c|8%&>%!iF*U)XG9*qFIYWu!w{g z8U$VCD0lUL9_9E)RZ{Rn;B+3BI4<$8sZ!t7kLN38FYFEJhU(6sTlZ|SkL0o^K>42Xm+#dPkr%+8G!rPHa75|BlCVD*j8)Cg z<#lVE<)Wy1o1;jhm!ocH!#L!&&J$EXwCA^TjFBs8$Dxqah(GX?<_& z<^@y%viVn=)qf#?YvcoG)!BYj)b)I4Mjdj0rzm{X?5rqf7^<{B?i{@M34hLfjJ_V7 zw*P&xo7VI03VnKT6DAWZuw5NFKIMoRI1Z=4F(6$k{r$Su7zNY3UqYO^ZP&lfE~bAo zJS@rxZ9D&xA7@@2{A%}WoVep(QW^BRCjrVKAgeoZ_bmq8_vzKo0y8x$G^yRHf~*Kf zb)BKZWb*v37jd2xZUZzcM!<}M)rG>>aMe;9*#O!E&u_x}-P5716BBpu^K1|U(eqVIw0)!5f;rB>m5JtU z<@q?06<3d?T(<6+5kdn&(qLRY;>aHF(wN!JQIDpIXQAy|jR`ClqI{Mb+j?@)w!MiGe+pk}bxk zHGvGe>L>1P%H7$kUkUf$d$44_6fKPnip{ob0M03Tad146xZHHHUZpwZWX9=YppI1T zBx{t(^&&!=d2##(75T zt)zu&`89`Uff2`!3hBkT>vKXzfcaq!f0)1c0x<8{kP`)6Ta{{ED5V#g*a#1WSu8%D z&wd`s$}BY5Q#?K~DF4u1JSg=4-?Y>M_@7j_9w+Vw-5)F{cxhWE#?jIB6@n%8ThM84 zzX6LH+)f#NAX>0W#&0iTmMYA#^ybC9GuC#1w*kLP1v4lUz)XapypD?h4Q}4hg9%_a zPC{#Vz$etD+ZzeM9!+Y4?Kq1T%8M;GgGmZir=*<&b&H9bo5mj>fyK7O-@1MZQ&3--&-fxz53i*A(4G7_&ywk1$IcXxY)m&a62F<@}#mFDBwHF0`6WEJ3pa4 z>2Nvo`j40?8^>?vtUAH}{GqYtrHa)s=%;V{_@eg(Usjtni_P2I-msDGstcP|E{aET z5!V#Xa&`eTKM5w+2r_iv2|Ip!k?Z-xIDy!@mb@e)l4pgb0+c}#8ukmYV|V=d-TOB~ zjd+Vq=!&6!dh=)332gBpFVycmY$FYsFtoKXP#vwUa4ly-+goR*dfA7+;L)PN@+T5iP36M|u$pi}Ifv z#-vtVt;YdCnA(#B560v+xN*yHJ)3&cgJ-GSKhkhX2?KSIQEgmlm1`7q|D$s6w#Xr` zt_Rx0TXxy>GTblO%b(2-#~WH+bI`HANCs#;6sl1Y@{h&}GrRV)sIY<91lMSS%T%Fh zoDJ9eS$B6X-Gbgrc_jY0vyK*!B_E$rqEl8tE%`J`W3`auRh?e<$BA=c9le~oo!}7l z!iO6PTF@1CSZ_1E?NC(g2{`h1v*~l_5!$F#UgBIi(2zU~5VcSwl9A5E`0sKR72O%F zrhw-O`QQ~5$Zu7K6m}zGGgj^44t;oMXj*r__j760}jtAu%{(sTP zA9x!TXggn?TlTf`^Yz-eyDZ|0uK=I>TSdyi580FMpWZm>V$^ElPzC~bz!66v`R6BB z^ipINr$Fyep9yhA6!<(?6%pwnTv1G;3Wg6@IR(7mk={SvZ@mL1smFxz65pNc#1FXP z>`7FyER%q@_aCk~>T>zE>W1)nECvyXoumjeaF{nviSy;2(1HM8?{Cl(Mxd1`EoCZV zrWPXomXz)jsZ7fgo%NTofaD$E*qJ-MYMz2utiqlz_emC-6LrXc`aTy>x_b$zIPmmb z&3vT{h;u*SOCL5Cs$g%PlIXy^)>T83vEA(6p1V8l@*+=G>9Uw^!_JE>w09bq=aV-i zJGo9sR8-pfB|Cq&&T>fiHbT11r^xaaa6Kaq-f{FyuF2%-8nCbUXb$J?)oyJ-MKBil zrW(zE%-i_GypM&v-sM~coI!)VUxl5tIvjpJ`&qQLy=oiyJ$G)lwrTb*Xx9iecv{S9 z%d5G?>}9hKcGo$em?ca2PApIYT(ap*Cl>c*GD1V6d1v55;iPuvUF$gq(Sa|&Wi=vc z=F|F}juP=yKh(VBY5G{Oqyz}j0~Pcfcsk znrQo|sfxXW2`hT|a;`5gzDLxnZJxDl0t%WI-%*K{I=^UpaU@f6K^(31pB=l8hYh*B zcXTl;4iy6&PEXelFl?c4i&6AXL>?FbIozOhdg)8V-_tr4OnH?H2VZwXrdb(7qiP7) zm>8_XEY!;tE~l!j=e8w`Uu}4mTyEr3oi64KOJmRfH%LR1xe2UTgVwqopPvYh+o< zQXqwJ#qjbfZObXY$!lJXAfJ1fcL}ek$nUl!S~1avdEbFF3-)||3BcXNK#0}4DbzO? zr9|sK-)eljjSu4jVBL50u|)ijBA)va=@;{xY3(FD43a#g6~>UxgNiPW;FDSNg;Pt$ zlR+V$lN@1)5`T^tfiR?FmDz&*X}#ODPbTDOrld*+dxsRJ=(2N4B1gr@>I>2jC6arQ z@HP<9r=LyoqytqMp?NHgmDeH`6K%jl-u70< zC&|5PP6HruHkV8Qp0^U82XtaWJ@M`MnfKiR7iXO`Yv_O^bV{_NIZ-OP&qX8Y+KH? zlvlqpnC0Hj#IF@laxy$w1`WhXdTqv@3z(2~I4LJ~Av?5i%^!3inI&9j;t;6u*6wP; z`#py$xChn`4IMqc`)^VsuadR`YRDJA9y>lDkA|ipTKXc5Ta#Xy>N;>WroGVKhEWaeH8T=BPA6c2Iw#jjQXM0&BXnn5V|py zTMldXg13~`6eZRT=Q_`tuu{;)OKO;eFp&v&sbhmeL#Fzgrta1?bU=dZ)PBpQ_5S7G zVJ)sQnp-3~;~34%^m`vrEo`7SJM=9k#N+^wdm4l5s2@-+#P6CBsECgc!6IE32wv+_ zwHj$YA^Z>5m)3Lw?Pj{Qw6u_j27+|eiT8t1(0^22k6#k1vdsoKt=3YoXsX1`*d{pj zo;(*y_oK7x2#px}XhRy+yqw<&wU~uxR{_I9#G0==L4WAZa0?t4%^c|iE`BCt~Km8ND zP+(DXt(e!(n?l?M%I6K|HGU!|g*yHH20U7wCG&KEJ!R>BXBToT4hRV)1L0Qu%GifWC>=(aZx@dJa8*;MfvBAFGqK~SN2#CZP80lpibt= zViOr{oU*xAe+gMf@{M5>=&Ex&5E{3$3|PkwZt=h@gg*B^7G%Xit!iR~XMjoU^vb9)-oUNqc$7=PIAthh;-ZukUP zp^)6(j^3%%!pXB3Tv$M7-QubX0v}1oYNj^XB^7vx3n*+;_#jN^BZ<~Ok_xR!9NKVPlB|<&f}yUbViz9Srp%S^c|k}K3j8N(<>$r zWUdh426o)JG9OinTcfUJhkP4YeWLxO2Lluj7g7x;X(4*Vw}xyAPo)@MO4WLI`=vKf z!YJLEvac#B5$otWLCg1^3JXeifNv_om03hIKs=2`N*~HT#8BU2*m{{{r_o$G>f|aj zaK>ZQR+@6|7Ngf|8?my*6+kDPi>CXuh+p z{SL7R;Ws5*GhG{Iw~WGiAt?!3IEoh^c@qMD`VumpmU15w46O5b*j!uffIa9fS@UVL z77&lYOik4z^}CXVvZ}}D!^|IJ1M4eDS=vrVM=9wi`j~~-o|R8>sh6+uz$EwoR4T&X zR$4clz)Wz5n)RN%`EpBTMIQ`Igu;$_l<+x;LdAKg;ZqEo@ChSaRt!00pkB)v%%KG* zp@U+iLWIN1p7y|X6LoHaB!bx>0uT*Vs&n2o%{6ROSP19UrB$z5$fhi{prtjir)azn zoPZ1TskI%~hxDe9FuX^Kh@4oz@)l1Rb;QVheVpA}x0Al%0@H_$OEf-<4j^6w0=0G0 zzue+rjX;I)Yb-e&GinWWOw?JAI*d^D*K?7asX9##uUW6v)$C*)arO%2ErJ9q_`Pnf zu;KQQ=Csh?ry+)D#(9T-AhO`m1rJ*$)GYALTK~o%wopQJsO7TUbeX04Z6%t*x3KEb z;9&4YYl_W*_+l(>mb9FoeL6It?A$)_EysWU;g2I|mUbvUkkvOgGu!B;-q$yRpiK~J z?)Rf-N7Sawu9%$kIp^*HggTZjz^-)eaT)<&o$t8s&(@IrlrqW^oy}P&1r4G7iT4@pg|;r`VYb% zn9bA%v$QodqyMZ0 zgl;8>rvYS)V~W+TGH@YR@~MFXfE695mF@&aLfXUZ%4*e@nV%kxU4BMg)gGP7L8u6o zYTyy zn};ogyfOvx9YQm`C7FjRWI`${%n%6;$s1!>ikl(zIMI;yc?|d!aDVmU-6JDG}mCtP@!e2zH>9nH~rcm zRQY~&CWV+Mxfe_(m=PZj+p}9!h#E$Gon74Yd{`qQ$lZHkd;4iR*m+?tEe`i^M)mI*=)=Vx~H(gm+bIgW?PDxcJz_&pK%VkGdktfc+FJ z(i1Lv0nyYgGD*SFRt9|V@04WWHmO~sQKbZ27-FAM?$mmir>WIu)R&Wb66guvyd=|N z?vna)7~j*;y?-Vh?<_M{2`JHiphtNwQ3l7YKIpdmQg7TZIU`CkM*68o}Qsipi_ zvgq4IN$@^Y!90*R@|EQ2q8@zpE2L3<^=PYq5EiwMHCo;Ii=baALoCHx+GgHdfzU8w zVlF#!vg(r(WG!PIRZyv{7~UojTDDYz=SIoh3lqq#80=J{4H>x`0<^44chyMXp|L!! z#FpOw#KD~ynWZslO1c0RwF4N%V==W>9X8X5)xVZdjFp)JZ||4s?zaU&AmU!T=yY7((DyY<3!7-I-nhVAK1$ zs${&-Qfz{TP08}|2c`;K>MxXAGTFJ997v4}n~Hg^x$h~Hz zRrnWjkHOSN5*05!fC>r137yeN3{M*5aGXE4ND0oJ%0>v6YdQOdy&xu zL~6FpvfsnSQIQi7DPg7!ol>*zD9kCeU`xAby!RQ4gz7mSrVy`eJX~CnRa6x1Wt_W= z=0dTb#qbnjzm1aN4m zGQ>5jvm#my-rcL{fg{IgN2SsJBs2tzprN_L(Syb5>uNIl(;|0+?6EZcg&#)A>p(__ z*uZd#bxwcYaO+PhXv@2X?s<8HG>-n$2d-hh`Z0G3y$jlAnMJAI9D|3`@#1;I7)32j zY&rP<VHpiKc^8eNF)l^3?-@0p1M!Plrm?OINGI8^&;QyC8BLRD2!yG(gMzSGJ2M z-`3GJy-`e1=hR10S2SW;c&eD}OATmJL@^*?aem{o+uXD=S*HO_s$D-GaH5=tR<_^t#;3&qBV@O5B# zLQI}0=nj_WSJHAxJ_h`%+Zs9vYwVS*(QxX|Z!UYW>Cc}hUPb_s3K9IM_My6zqBZ`5 z023K|{tm2H)`8{uqUtoS4My&0NWBi2$3D`nlVfoO!S zZadR~?Qey7>!NT_c9ziW_@n9D_!V&!58B97W&Jm{W>u31dQ%A2hz89BjhLG#{U6R| zbwZ`W&Re)Y-Mn0jaiqVE3@J!4GPR5*xDKNmdFW*|ii?Y-pyt?2adOXuFj+fP)w=&n zO=___VK)~wRL>stot{D}5-T4*fzF!37oM3)sM4KFhWwhY-y5)Oc2+w#|-eHiOCsaf-*Z702e<$ zSAD5H@TX3sXm(4b?g75=Y46R%dh1lIH`)9@pp9u(i zp|SfG2Z0!VbkBy<66oLvCcR)t1$5XH>%<0~ipO~KFd5;CewnfiA{-BK0J7Sd zg*Z!95y~B03Ws7tkdEhQHuLp5ok_JPi{p2vH#*wuwp)=N+`tJXz13ap0&y}Pu1lDh~Y%=5?m}tQorGh zn-7-qD+qXK)gO|F{=N0$EeipVh&T~&0*LtzIT*G4;kTH0`7;H#s=2Vf96HC;%rZE2 zE`w2!9t9Og?InWA_7-X+o<+vH^AmM2M~C_jmrl;^@LHsfag@J736+1|xMEX4ZCV*9 z)AcIgB~YqN!Q^I@$?}#nP~h{we#+0(!i@hbk}i!ufZ^(fu17IAMbh%*Ts5Xb78d7c z2Mjg6q6|=i{+}G=}_ zfS!FdC$qX6E_@+copR@NizMQv7L-8@HKJAoevZq4OAAG8?4@#@k(+qv(Xl7632qU` z;VtK)p-0C_qXlnPckIiGQs%%WJfXnsZure#5l+Q1a)K{u98s1P+43wu~a9_-7L=oi4&jHK(XNW z>d4Ym)Bkd73Z8I2$PXS8_i(voSxZLp^=g0mCx8U{?3Yd3$6Pe4S!w}LOOwk4Fk=%) z3D!(!9I#pSyks6@Ht?6PXQ8IIYzs*tR^O0};~8_utbFNeDDLXXrbKTkG9sEnW~1`QIz6ouOrg$ot6ug3?uiKK6NTq6ArZfPz5nn#nO3$<*xs3qIIf6>a< znNy39@)4~FMA(p1t%%hTI`-1A7UZ9!D9fY6J*@*-o6FU6g$8JXm;Rgp_kT_h8*topgLh=A3a8+`_*n{D6}|+TkK*@p zNjIbZ6T3QeyO#nv(8|uspRYj~NH!g!&Uk0p~_lYm_X?f46d+Wo3? zR|xAYBOV^$2{WNUWlh>m5Ju*oV~_J`fJN{*kgaWX8)xEGuCbcY-L0ZogZb+ePXBeO z=SP4?c=&ZKq02r?JgiF@cYEd8Td z_#wMBgz&ueexYiaOt4nrP)&f^uxdvVT60-3*gMoKfurddr%FHIcsoErfk&I`JRE47 z0;+xnlPXPB(%45a(5iqaPn|#vpb;rEr+K=~Ku^mDs51ghEK4o?O)S$K*+LMmg5;6) zH#FfD5}+|mtW+h@>E9E^!5G3-k^$mUQP2Y3mgB9@^4P09j1%{TCd2B&`3N}zciro= z;d28+%S}-ak>x?HBScfQL$hFb8JoSA1^o{>`}-O>A0vA`!6T|e?%dBpzP~T%KVCU* zFVMGtiagL!Fu}GLxRd;PVY8)fSvCH3IE&1e)a8{fp6+*U8WQE!9!wCx_bNsla0j!7 z9^_=PdA|DH8mFf_Id)@qg?q%H`u;&u3US;q%1uZ%ZSo!GEDk{AdW@!o0#vB~A2^bym0pY9jb72mD{MoM>|Tw%+6zcs z4Xp&Dg5rN8bYqgCcEWta{5Hm$bdog3K_mxvXrViz8s-0z>vssmw(rQBu*jhvWaf2q zb5-tUv6BOis@4rL zF^=U0?5igaZ~=nIFqcFdT6ZYpb8|&J0ofX@bR1?}`L&@k;#sqx1vd^hvLOMz%=>$m z?(|w6)5>88(RiiO#;W1acKt-?{<#ZGk2#T;4MOk}43Ot?ziM|sO45KUGk?dyjih=& zX8Oy5Hnj5aHX;luIkO^-Y)ei}Jj5u7V_bglEd=>2_1Y7Zfh8o>`C`^ys=tPe!nijc(@ezhP$ zeqkRrx}N5@$-{p!%%fn}xf_VnvKm0E4NK+KOe1{t+L|8roN?mDe`K+_^MzfgEn$r0 zcmmR^i2eOA5h)WRWWsL>_^iH2w!N0wbfei1(r6ah`|&4H9G@vCAQfA_`i(s7QPw7t zR?P5v>aR~1@%3Og8ytK1I-{*LiWBjK&Gga&`vmm9`3!Q$;Eh=1#%@W$)TkRg4`?f# z+`>`}sTUX4a+DPk9$({KI}{gza70BGOiYr3=|2GJiwXFLZ&tZOq-a~hHESbA)+wVW ziTQB;Di-r_qn{?dH9fqOFHB*wq+rvm&=2-y(Uaeq;K07Uh>>EIx?-r^>rhnz(&M1| z{Tw50cl>S3Qc)DfxBU#%{CX(M@+_Xi}uaftE>BH*s-X2Qqz$ zKvnp-Xee-aL4M!>Wf=C0G^$4bi)z7Va0}UAHpz87#8tD`nX`_~T7DRzNkR6h zsO_ssj0f^g4h`MJzo-VpIPb^@T&swskbKTqDq~=Q7x%FWA8W<7#uH&qFY1 ztI^Y+0a(iL?jzs2B`vgQ^oaA`sKHqGb-j(f9w~v>$}3#L2DT?Vj<3fjvwv$q7V?k_ zn{oW|&0vx(Z7?=bTz}X<9RFb0TpbiO`qO%htXg5x(pYus}K^W8;y z8-M8D$F$;QZREk{cP1QgThPPjSIpbQXcG&X;GJ9)-$RNR3iNb+#vn{8^&`B=jr*Mw zR?${bsqd5VigO`LZ&~~|avrA17cb0&bZq3`88vu;xfd1~D6t6CY0DVN+Ao`4Wk<}I z>t;D&R_;;Lt45z#a5Rf3EPa9Yxsb|l>HZ~y7REY>{16r76~y+hvAz4{*SbPg!jVu@ z9hg~71`tI1g@HX?TnH)@1t05}gk*q(@K`!fbfyG;S6ZCb+f+wU<9Ebs|E`tA6<6EX zNX5_Ijyz|btxNQ49go3^FlaQsV`2hACLU*E&2?{DK3uk-2cjwZ?!LyM=>c!YKHzSy zI&=EU)F;^gC>oeLw9F(adn`FF+4)Me`@^E~#@r0HJm$^kNJSGY)n-Jw z`|4=dRbtrrwb>9ZGqsOCa;N4;=f4RYTe60Jo9arV8mNwY^-Z8Nw_Hc&uvV!JlajeP7m@n)nK6B_omAA_wAVd*SGM8Z4AACp-yYED7`U$98~xLL zNbkN*9KhC{&lS}iiw>vu8mDba!=s`0pg?VqfDQrU^7C?WguyV5LIz8FjqQaKNL<<_T{hC_uC)E~yVYWl3>-6*8+$HV;y zd$#bBwgm!U1`1)mQ|Tz&N2zcpjfyLk@|i2Y#kB5NwldpT;GA1*}>XVno;yOrI{269h> ze!RhE6_1|E&PEB5l2V0ddCU(!HgV&x#w5Wg)Q0_r<*@dt{zUr>_C0sV<<4CScR?iS zSDjXF1KY+pA>&PW{Ra1jfK3IN&wavs@5K-7ujkN8Fa`kO|9!{GUzWrs*O*|}*<-AE zHt}7ml(;qY;YcmObqJjolE=1?kS!2b#^Jvl`0JOHis0GE)JgX&5VUuu#w1_<_a@-W z>~5?L{0$BGWSQxD(#MN0p=KLvd8W3*w_k^w*x57vg34;5UujW^BH-dpL!H3SXolI+ z%O{CqcK?bIX|v^BL#zh2?2-F;52+||K>UFRWGScSO9Ts^(Ej}p&ZpQkk`f#OiSc)Z ze8+~6lMs&CREA8U*6$OtR_(T*F_mlN4`E1xKR@8R!1uFi`A<-7hYN zI`V4JT02|Z05T`epWu`RrjV(Un#D{_s2|qhBUiZfMEb#GT9B=|vLEKBtMsV<;>Ehs zW;>VTq!7nc*-9-)VOR->304}~ztF;j!PqPJ=W#(iyk|*iR7Hd|GIG3T+&heCjX!{11T>f{0a_1;4e+9IP3ivT-C%{()TK20 zC6VI=&jOWe+-wofC9s(&{{2T>v~?$&6GCU|aw<$q();RNpu)oi4OFzznCLP7`-h11Zod{QKB;TJkh`JE&eJ5p&_jx ztibVW{l7bApgH@zcVu>ZDn=yh2lOy(CDd8yz-1z+zyusFMt4`NA1ykYomm=hXM`Va zYXkzfs^xGUu3HNI>Ogp5P$>Xv+B>CxYTAxW5LjJgwCXEcK5=O(3h1qdX-iHnY5uFD zmfQ)erqKco(S;B>F^K8Mxc53fXyFrzrrsHfiChi*0yAc{2Zkfn5z-g}$~CY3Qczf7 zt$=du<5yyUUNvat+!1sn3PG7KU4QfS34W+AR6a)rxu;3BLoRGKNcDvgomW!jF}Y_o4J?GKG_)M-tLM6$x_{3s z!i%e7P~QLzLsFevYZm`nL!WwLpVtcjhz~piGhJ?i4W$nDf4=-;_Dp73$Jb zuecxmt8bT(wa47(g3MAe`GD?ORU8@qqGh?s{{ZFiKsUSPf=9}F;`iL5-fbaz?kTNX zDN@<+(Z`#{ydXWWKRx74Y%_duwY+q3#6S2_0J+pC&B3yXv#JuZPR-;}VB2>2_Rl9N z?J|WII@bH26EcKfClB#zzvX^E^J<}~qUYuenBoH*t`MzC)%btvH76~}D1$_y-PXFB z9!ozFD|!1%5RIEFXA)-@RW{*9=U!U0RzB2}TLk=!e@&rW!{vzQC66701!zD(j$sA| zT~YhVg2LD}Nin)D5+WV_j(bAm);cm$dF@kt2h602V2fXM+!tf*JoC!$ygpeMjr z6N9$2{e5I-_x!6m3BWv%t52bsnv*56-2cVdTSrCJhV7#;bVxHOf^-kvAR#c)(lB&~ zbR&qAbfeOZg!IrQDcv9~CDKTXASj$Y;QPMc_d9Ey^;>5x{}R}HKXKpBb;or*h*shk z%2s{|)`w1%b>(&vUlo|rl^-<=kr?8TZcOYcZ&oEzH5p7MwsVb zAKf%?uG}@#VgjG{ zx4vQAT0eySVrkbYQu=eX$DaDWcw|2C?MblW8lSW^8Fq=+rUi>gal)9<@ zEgSUUpG|t#qT3*$Biku%J>_AqY_;jKhZM>Em~NqzyWHrjDqV?{xF1?dK5Cl=_Lnu^ z$py|{2ns~H=r3#NooSJ3@)E*4KwWDiT9h}xuk!fK0~mQ0v*nN5^xb}Nc&LBN+q){I z>z%bTi*fn(!7fq~_22AqBvO5~RulGpTwG-BWCs$LW%O9T4o`o^wMbDe@x1mPyL(c zBA|%68pbRf?MFwglnCC~(`VWb-2Ay(>tyGr*>~pseLbfsH6D_!FoEni(wr@@Yh+O! zD|J#w=ZetBY?|2>9un@(bd%FGmgo_N)7&B*fL2*?zsraxSsRp&B%OPM>Nk+b%=^iX zBjZH?_s_rzM;ue`)z#E_iMl^ueMPda&A$WE)4o2p&}4byh440TYF73S&Zgg$hWoeQ z#GmX7?RsyuZO=c<3yaOTs|>c8S}0C(h-2ZHrwThi{j7vi|0^#-M;~w|PgT)3XH=Y6 zBiYA}ZOqPaKa^;on#Cf{>e~w7rn0K$`7%l<@~1#jXkCVhIbd4i=2b0cPhtHlUv7kx ze}+{?@++X9+5<+`(zh^hA-n~AQv3Gc=l6e+)y6#MlpO}b?@HRX*>vs9xO!8i2#>*$ z+>8J=H}7ngpedW}i%+?fA0zGy)%y9o)dX=CNfX-%*Ckz+VE(vmUAPSj{>T=)@_j_H z$F@iRs4r~VBlu=hm)J^IsI(=_&>9dS_IUh~)8RY=V@ADo)}q974x^+5zJD#?&h7Df z+on17hvF#?Xru!Q5DyE<++BD$fyALQW634NYd$0VfP351dK#r-m^h*Cmj}bMN4CfMxFK6$yK^tFt@n`_(ALUyddWNA z4A!FiY0|MZ7QBnRjC}Co$J2#Jzb!j`cvGi1UXS_x2niyk-;)Aj2n!lA!`ij;;tfGh8YF4>o3-hFismU`n@PGuPpbe9GvrDG1pUP(ZkRzP+G78SpSG^uY?6zx3^B6#D?jk+Tx~e- zaQp-!DnpV)OJM}2&&Fq8xjdQwd>R>mfPt7f|BETEU2pVd6@bP1*?aN{j5BgywKgIp zzu!e{!2tbSbX;+JytD(;M0B-|=u+*taCRi^#dGv4cAc)gvx*vdiR7hm$eZ^EjivH2 z+l%z&``3y)HqFJxp6V$>=+Cmx_x)JpdwT#^H6RJOWWLSP{GvT7=gbxptHgW(Uzk$dl#yx#ZdwZU$pcoW#H0?%V=O$hA zw)h$h+ZXdb*2i_}+}*kRf&66qUbR)J;zeBjZ{l}f#paxiy205j3!c@=q6VYQ^?J zKOErq^cbmIT;oK8JW-956vlgE-S*3{q_ioPk_~AfB1SKeCikH(oY-BU#wJsr&&g3l zlE280fh}N6vbN!US7G2V`cNzwi3Bv?^gAMG;T?-W`%UxLeDz4l0z`*Bf{tafg`i{Q z_e0R1qL>FT$YV9nxm2_{ccs0{&7JeSswP7-O{glPeEhz6){@gDvOTyBfF?;?(UClq z2!wT&RLVTgpE4+kAZYFbA@~3HOHg}3-vXe(OHy0}#?s(%A%L>gj@eKM{Qgman@!7> zFPX=lCGK~9iPoJeRWb&}IiR|z+Ycpk_&XpGRLvS%RcZu5vfA3m_^O~~nR-p{{ya#U zWfWk*$OP2@yV*pm;FhWVmkrqi{!JwG69gxQ^TKmbf3huGoI;78)d~22Z7t!>rf$9X zn)mgq42*Nn$$j*e)9@Nz)u&&-5 zKd`>FKTEw1uDxImgzd`KO$72@csw3Er-_b zk3IFXJ!k7J!H(Rg+^Kc%WycVRmbKUKA3TeF!L`q1JDrF5;&$gy8@UYs{M7c?acCZ; zWQ4HQ&^9_%%YWwM1CdW%4-AycMpcUe5+@-7Ng+B;KOj0zc?iu^kY6Hw3oIfgbV+#a zd;am2&dhX*@=59lA(+-WU1!pBOlk7wk51bVwzx5-DNV1)=8Du8WB-ZLr6kX*>%y%O zDm}P)qCW!VZM@gY!2Gv&IP!D9EyHaD7P0q#Lv^+_^9WS{NRE*RY7c_%D>G6AG>=CR zT;ao+;ABS@w6q+q@rTB6CqQvSbBp&2&>JvN|L2Sd_@azITDAt7z*W%ot)x7Zz`(?e z#jZ_;>#P955+b-d?b~RnM>QPaQ>doTvR~#rJu49#v3*u z=t(yq`SbMAKT*%u25v2V7a;DTrC*+<;PIqod3n`=T+ALOsL5GbDc@?&wP)vFTf|E_ zqytdrf0X020sD!!F%6cPs}8WEX%UX-Huky|t)cC<5DaU5eUmLG(+$OQ9SI@+jzO2O z9CI!M3wQ7P)?T#Fd80VO^elJNoGD12R5&PWWd7V}0Q_zEW7}5+T07J6=t3@!Su;Wv z!c0Ws?_Vw{LP01m?@GG|1K_hFD?k2UB*Msd+{qbpjoU2O;lFU--wEk z|1P2`!y)ZrhW8J(OcuL-d)T)sFlhoNFC`=%-bT8D;KdbTUpk z_VN52)Wy2NKm}UANe;SE+=*Jy2GWPygmY9E7FjQ^X=DX0Ob+akQ_2$#0+f8rTY32N zeL9}{H`pzYykV9qbeC`sg)VQZb;v_w|^GQbMY*fQO?iiTlDU3g%g^-}_e>tYo)yRKAM-l7#OxL2DK z@iEm;O~OpHwTz!MiuaA8S@}qPD&kP#&a#FFQ{&`&C(URo}-E7j+*_vkcNfc;p00h|K!RXCq1lL9ai zx;&=)c{+*6K!#CfPOg9&Qf9fC^FX<+P*yfPwggx(Kr;p?k|A_|ys@4E{TLjW5V3Ec z_VsZqJijokpITdC1^&njGt@j+WQ3SbOt|wpoEyKH-PA7?6?uAnfi=${)6b=AR@fJc ziOF%&9^WLw7%h7UV2s(9Mf*9USthZ={YC*NBG5HDP1?4S2$*K4ahru(;sLxC?`2&X;}A(N6wx%sg`@-gY!LWPi!kfo2827 zgdvUxuISom?`5d}=8oT$7d0zJ0yaa~g&?D^me}5II1$1l1c6WSi!32CIp{pAaqsYE zWjpj!q}BLDJdSAsId|5uor7K>>2qwQ|k=xX&+N-tp`Oxu<}dvNb_ zx}Um5y6v-DlToKxbwlLki;q-Wuw+aQXhio<6DB-Pz3z+WjBOW$qM?LMa^Gy`<&dD6 z9-q8)`2A9Ts7>Qg?+B(Aq)HbVK=qH@$i;J}X(DOFD#6tN{_f8l`Iq*t@`f=Fh;vi# zX>!7rJ*SqFCA{sbz86#8&K{y!2O$!~5DFb(x)+G?Y{6l~k8bbbWu$0B`SU*WhDpzr zF}g5j?5?$mf)P6jv90+h38IzgB*^V|+*lw0`C?SU{uGg%F*P4f<=YH7By`M{z$uc@SMU-!9#I|81w99qxl)>HPivsrVMkKhHD1FU+`4 zyAA1uwpQnzg%-yswmme30hs}W9Rqyy+ZoE>Qa2_F=%{@dUu7{z-oNh=}uTRPlEMW6hp=42C)%kUTGDblKlqzVn z{gF`%t=QH{Zu->*w)&TYr0H4Dn*w*fr76y(sHBFIZo9p<$7iw79^KEAoK9zEP!p7Y zR?a5uRwyRC@%a|8Zja1=1@%;Xn%vSH@oO?0R(Nfw6(fAq*bD9Ke{W0<{2{{C4g#HG zh+6X?vglb=5JBS^CZHAUspq{~A?;(xF_8)H56yNgIy*R(Yj>8=YVi%-J;pF67YM2M z`16#ht|yi&N4&J?TdD+rUw-|W^geVeoyq?~hAVa=F!@J4AJER}kp*lIp+xbw+wS>_ zmUmQKgq|j927vIounN0E9L3CLpOh|qd8XL8jq9oVHSMx-@_%xknHQH$5`^*Q|}=`_Qy1E>rXL6$1n3az-htXQF{(F$J|A9F@{aiyRG;|Rx9sw3P-aw z@&>GIu1@22UYt|}on7ywYLyu!FMA9cMrCvU|9W*h=5Qw8X0zB++{b>&<${cq0wt!$ zmo>G^9(bM`dntj^TKxoaZf8X@6Kd7a*#M8!y<2o5hKWmIh^{~ z)7pIhG<(GXGaa{Vp`-MkCzkG!Nm5xnIoL&f**JM~Y^*b7azWc!%w1O? zpal}*Ip+ElIA~~%4W8XL1)59&k^il;{3A)N#4%{yi)aZ1^brUifM%5*@!CaoPFugS zU0i*H6&3rdVf^Q;v5eJ9(ONsZ!3c1;aimQ)zGzlyEm0(7=J^jEK^-0V0bK|n|I$@F z_!n42Mh77B+e`>FRs!@|+ykCZel`3|fBHbcuO)ErUOz4luMO`4RVz37`>Z+k;pJU&jgO$o=XHIW3G(X9fI+I;8GQF^pQkn7!}=_5dp@n8O2`SnGpa!8?8>}R`l)g(}k)76ViLU3VSBK z7E&7A*?UvI6!CG0Jod4e3Zhlz`>)s)IWua@j379KBWVKg_Rc){ZlAl15a9&OGjNTX z%lahcsd;McmwJ?WhxF#P`}a&YfEJ6#SC=AXUq|1Pld0gj6i2dxHvY#K|E)m+S-w3# z-rX^j_MdB8(vlnQuTd|%`a^+Rz;AtzDk>3>#6<{IbfrT-#nFM>m|~jngp86xzqd+@ zq1dT2c)1zA4DB;^e3}%5G^^6;c8tZ~r9i2Q`kU?+CLDe?2c)C~KiEPZfq*t(kWy$< zLRUX{RiQIkj;82SfLP!(O)ZI!Attdm^c>0J^%)~|`o`Jqs#6DIULf?67~m{?>#mX} z2l-XJomsp&3kY$XGtuyOU0(B8>&3G(6Eg{|XQSCZTX}AZU&Da1d?aF=MA~g*1e+P~ zCb6C7hK*Jgd}z*h0yz=WV>`(ixdYTP%n2XYrOpna9N}&cap)G3G3Y6>-78jY!&}RI zM|g)j6A6QNZ6^sDH{+|`%Fi3?4hiVv6z47*CI$I@Q8`wJ>_&(b2zp z-?f1G8Otg8mSt#e$3KQq==AE>_Z@F&$3u`j6NCdJ7Tf5u0sR@8h3VFO0Q`3)$_4BtHiH3J0~)sOVzCaJ+F zKl0WqFjE3!GZptp#3KaYrVusPR=8nUdTf2RNdh-W-%0(`HoNV-r;U4e&mP+s-4ks# zr;k)hxctLa(7pTq`&&r2x^C`LXDO*jK*yr!i=!UHj) z$#igZ+@L%{d4zF|BlnewhTCEXdT_1?#6U`qvA<(rBEugZNp#%nAnqvbph1>D8d)uh z2fK#<4hy_Os$y&BL>EV2B%VvuC8hDW(zPz_33Vd*!_}qz9yHH!x6SX z?{DjfK~AMXjU-^x83J45U3>_?b+Rt9cX;}svDiOS5*%2HPw6E&Xf@h>^5EAiXG2mz zoYb@%j*MGcxhtr^miGB$eO`>tvYoYuVzWl}*Xw9`L*Elh9IEdP$XYp=?=Wrw(Jx=- zi~pg%0^L%ISfqi<9^S)@^zHyjVuDglC(h0naifkq?Xgmy)9~I8&S*pexXVIEjyX@Kk%^o@rwNt>7KTsK#;F7efgV5_?1sT9)ekgCHBA1Cv1F`50jmK19ui+Gx3cxg-{1cx_l8v1d4A6mn$s5Zt z<`2)zCl;XMeSh7)c=g zUHmc@R{Wn~D!|-1ni@eDYN||9Ngn;m?7=-lon=__G2bLQ)d>OK5--HUQ@R4S#r>ol zyO`bO5f?p-k+zOqoSup$$j*Oezqt()hej};F%)tfKM$s5?>TGxp&=4Na-XfSdfFJ8!tqr zJswA$?VG7&+f&R=YW<`^b>b@aFvwNBSt=Wfl(dy`KM{VaHw`c2^<)pte27dqNz>0! zNu7fqWUm+QFN?fkRlmh2GC;HgRA#hjg{I1GmoZWj_pf`zng7o{o&`fs6X_iO6f3r9 zs}Igron{{HTHM!l>hxBnZ?VN>eD#WZjvnSYVfYlF;n2_w@dy}AJs1_9uhztNRRC^| z3?rae%eGHt4f`$5ck$2M9hDs71A;>9! zZdi8m|Fqe!_Z05z3h)-7zup3w#P%=VqAvUgkBRxF#Ih*uCegyV5pjLx--j|=-$D@N zztH9`heh;1&;`f`h870Y8_)zRLJM#07ewF;XcVrk0!Hm+lmh5n zBwcJIPrIE%EXfX_!zuhVqQgl`=?)1}BmeVd3NZSdFfHeBAg@*AQXpCMo#4bBPAtbL zUQx_+dxCd^Z*krEUtD*WWkdZB3?>9T((a`B35BiW0BuBA?}8IXtQ*wFa7>Q@?In>0 zJ!Ku+9P;GbFL^|GglJqpxt~{ZyWT)d=9VwffBsJ|z<+pc@Z&lC;wuJ7msh|@zzv!a zk{`A&dJ6gPD|r)iE(#%>59oy`64=8T$Oh#0qCVtYA}MMvjBtq~vQ^7ff>p^?w~oDz zvB38MPRP~MyN3ZoJ`*)ma9UrOPsr_uSVkbwJU7o@G=CefNrL{#+P{O#N#d8T5+ni_ z<23Sj&~+4cj3Rpxv=T+OTm#n?ka@)6p?z%EU?4FTv732#IQaJ$6xNdE=0s6XJ?cxqTtUF7HQ zJ0VerAY&9qP%K&nk{^0Bs2<}Hww1;()^NXyt~jksAK=C#4FjeFrr_JpVBgXCP?Q7u zJ;W@up91XCG%{ha5a>Uz&ANT9^#8?cz3x*h3I?aZP$|eEDD$+`JQlqnQ$_J2WKy;U zL@B>KB<;Ur`HH3(Cg^toJX;aa+L|UHfHl=X&}D{6&C4-^LcT!NcP^5OMU zrHIJqBO$_ULbq?tMiCdZm0)HEM*sHO-x>SLJYF25rin}cbkG%73&QjX<-Xq#%lt-x zU))e}LDE~Uk3$2C)yrYmu+)*Fz$-y@)9swd33rR+F3;Zcy6>`CM}6PF!xVdU+tF;f zAbr(DbFt5BMOi-nNAR*b>ilr(`VYg)_SlU{Nvogq-#N#bwNh^qLP&6#meV#GQpMxz zph$82emF;V@#rWjz$CtXU^uK2qum(yH;NE-{Eh-2z#Om+Bo6%5c@p0mNcXxcGw+Ij zHEDGVw#o#APBY;?iNH43L`CE~`s4t18<6jacw&P0GJceUERoFI`<4+lbS*BbKYVr7 z*(jA1G&poqt~BHqfAsQqP$1utc&G=iWn(hgDL!Tt7}v?mr4f8}^F@@{Mz};I=7tr^ zB2aS9YZ!+*NGLB)NQ5ksRRe(TDPuIJsh!ytU22tqhO=_ZH&H)#(%nnTL+5;N_ONBkS!%N3;AC%O`D*Eg=rCp-)MDoM zxvB9em{lg@(~^w+h>*ahZW2$HU0|PX|8hn&$II%Wl8j2syuRJe5HDZn1})JduFNqj z!EJKJ_;ksMGX|$W24@i1x?i?I6%G~*ZgZ`ipcq!|u{|SM@yI$DO&6!8j8l{3-Klja zk+gp@X6{2!1YQZJ4qi{0s&TIyvM~3b{zdm&02*f`?5Pv?bP&K#*Towxoy3?odxx44YDT=_*yB}0t5!CDhDxW z@36Hb9yfcyOt0q+67<7^W3<0Kukezv1TmU`mSm|93pvL6hwRx@{403%Fo&%mD#tIr zk3Q((z!DLS&HzIOeI722IupQ6xGAQVim*}wuu>#-Q3n^N66dvbPu)aYC07kR z@42vC_k)Z^7#~0B*3*0;{JE*6!UVZy?Y=Hy12-Dl?@<44?X-tH{iXN&7&PYs!_TV% zhUVz&*B6t*z0kd;Ws9l3GJ4kScm%- zz-Fo6FgW;%N9!3L^6p7iR5=|{70udc8b$B1E8e%j(4d~ks2nZDh06jmasMo7LB#GL zDR=73d$?>Fm&=X{fz_gwKIBIx=0YakCJ5va^sua-&fRHACw#K{U1)s$+9hX*H0*}1 zI41>*&5U8H1|G1|orEHfG)kZJx{NwhD2SUvy8CC9N0+SGc_`_|OQltBzo!g%p{X1g zRphhqXpC>4u1V@Qsj3QsWFmH@TNc%r)FPm7^S4D^SwDZ9`p)@|3^mH`!8!pgV-IIV z?jf zz>Ffl76`M64pK#=fE$GS!TeYHndPv-j2Dii0+EK=M4e{H`ZX+GQ|nxz_^v{6Wx+;c z3^IpLmOC<)eNlghW1%KHXSSRqsn62kfW50)j0B0`CKEA-k$o1o`PI-iP%7kJZEc7-MBJyrR>6r7UCBI(}HGYqOYKMgXJMC3;R+cFnrnURRr^o7g?i!Mi3IWa{L@}?+VpG+BynE;tS)*#fveiyax({9 z=$k3tvsBcbIHv|KRW`GWBRg*p(TDTJzOTCH`u4%tmUq@KmyTi5pfRY2kdmngN@@_4 z=u_UTL{!#_GtplwcLYO1Ps_bl$`KR5`b5I%cKlo!cUC^*%;U1MB(rK?f%+5mnTbf` z20Sn0(*{la`U4m00R@WaQ>5M#xo|HR8d+D&nz^X(q1UT9Or-6mWQA^`)^IR!V+{5e z^gxqvY$t1qbMF47;?-o=q6;QkEAEi@-jOY9N>+r5x?KxOt7fbpIYag>3QvW#!`W*z zUX>0;&Q2B>;YN{q;L)13B%bPj^~^0}Vl#Nc>Y^0O9_ZCq|M2WmH2pMC`dj*L)%Si^ zKeCoOIXEm?o?lpL>$1d!QCCVF^Y>%R0#lqQ$IH5%BYcN)U2IT10tY3Nw#x5|I1A%6s_(amI<7dh}XJn&ZY4Y?K?--I!8CF57oS1_F? z_+8##KTc#({rvt$z>X&{{1c4i<}|ge4^_ZI7$;@)Gh1m%=4r~HD`Ul^9~(V~**$O- z;ofOS)aK-e_FZnBxLk0UcJGe0a?WEAbT&Efj2-sw+^DI5gU+NgZ)lkSzYkRjIF57v zIvgz5}$~B4guBn)T;;+du zj+dF_3UfxChJ8}SLuzVM5?*j2ML1sIpByo>+bfnD_I5?YP!awp+9WsXYk}5I@Ls}j zoue-N2*ZV*_yZGcdxb%UuR2GSyevjq{0M>$JV!U?!sG9AKG3ReUaDK^`@Kg@Fk+4$ zbea(zVR8v$v>EIrp+!;98y47MS?I!)0yQ}fw2G<-=D2-xs~J=FhTctw8{f?<7cu}G za1nzyE=ScuINzC&+2Eqk$AqKjpxwL5qS& z`L(8Fm61Tw?yuXg$_88yvqIbQbsc{FGdDI?#3|4bJ1sCxyW`y)<02l^)^UaOXa2)S z(hF|5<4gbqW5yw*#&cH`S(WgHgo>%D$);SZ!c61Pi|NjDy8lB*_8xG42#=zeuOedbG*rMI^mh4F6x z^QSGNj7{GSl{@g`-Vk~xr9~@+ReOoP+xVs z7n--&F@|EUM^&MJXG~myT9~+rjcqTaxA2p1S4~nBbKTHb97hRN-0+0@yX*?sU|%R%Zzi9>m-7{DzMzIXZgZtVb{Kp(He)rhW| z6rY_m?>#UR&VWBMk}7EqVnd7SsVV$px)$A3SyJN1z*X9rY(;58!$pHq8`S&p!Ha&U zaKH;QA_KQMP}+)-n)O(SuVXWEVPn}NFQWcP8GNCv9GN=~f6 zJ0_&|gZ{Eb`AWhI&Q%FprQK(%LmetesUfp$L{okkzlohuosA*_7(-BE+hp+@<_>yI z#i>ectv%&vKIWKQVKqnx;NXaVu|HEjdkK1sZ8PD)9q37HIt4IJ>d(S@Ak8!0J=aZbf&?OXY_{Lvj9>S-jbLBd$W zNJhlL{TXq5WuA0L52E%gbKD=^Sxca#k&7Xj^5EqJvw#V&DiiLHbM^|CQM*ER33Cu7 z6LqK5bvq~J_TkICUJP&jRsT;h&s#fNIr2YGn3fn#n4V5|e5K6J75$C|vPS9))_=KF zxVw36`NVhpw|hFLVvhB$l6Ii+Il9F?7mTAVqFm&aO{mSK2{WA)ge=J6Q!n@!W|d5%V6nzG}#hJ&cP%s!REi=OU8| z=P~;4$FM*Bu1`1!U2DdB*M3~o@?%- zSF2AX!K9I!9~Fi8Sn%tE=*0+pcN3Eo%rbkK>@ali@6_o*&H5Og_$ElS_2%RWaYsLo=vVtm%4)QLm7E$=oj z0gK!wc%e3ED-(E+pGvpb=5_J;H~kKF(+pWqQK*YQKMZCt6BMrT!)@*IeO!mw*HhNq zvS>4xR@_L(_x7L|$WblhE4tr!Tl<1bMszN^XX}$}PyXbXD5n_3S(?uEQ`StpsNL+J z*4RAUIk_SjHuMLxG`}17d4Hx%`>N&}>txZ4p)Oc_52gWy#|$=mcY9|lBKeT_*S+1tDOmyVb<+K$` zt4MAH@2+~lupHwT1Rt%Kd~6PmXR}^bw=*2RvadOFZ`6!#Sm()|_xx>eNZe0XF%$$# zR>-zRiJj#W+N#SH6?_ibAc{B1Ria)Y-#%=qjy1_ei2?`h;?nLUY!eisXqJCcQs-4I_I14ZWX7e%24wrCVq4cINf2Tr5}Wh2HDmQ!}L<)uOm%-%H35<%!0my!C{ z4JkDFs3HXACDpN5BX#LIp>E@g#K)V&9XdgE;g{`H7gCK&Ga1In_LLC)9~otQBqoEK zt*sX^-(!X4RSobjU<02LJl^=B{CMTg9q33MUmrf+OgkZI7cO2zG^<4~HRQk*XZeH> zC(7cL94d}3m{l~+@4T*%Ph)Z~6LMAS$@;qM;%6wuRtTJ@R z+!ywF>>m5IielN5{d^iTDo5V*Q;VpB{#laYPkZN4wieC?Yfu3(umGRFu%rBWwKYu$ zVGK>TOJ&{}(yZL>)!WTMjn{*Q(WMSIe3~kD=W>LXV)9j!$T``&n>@qYkN8z$q~bLC zMp5YjBb0%@w%ToUgyAVNXgqAG=@;JXO~{(c136t5`?t1j?aw?>(+n0 zLo#;Sefcb8s)G>l*Xw~jpGS54(98WLhnlcjd8|k=u@P)0 zJkYFamkME-&JG^*kp8o$zL&T7#IoSSkU*1NzpMIMGbBXIeJ8ihuqzJKYq#_{Wkc^& zd38rs#hmcrSvd}N)l>$bwXTJ28(3`dHf-y2mVefD0NExS$^TuG}f?k#10Flk1}o zBiWl)`;Ej-FR`ml{YXcLNDc>F4T>)a3~FS27;d5Q4))+ftV~!~m%S|(Sb@DDU6V8d zA0zBO!A>+Ck=SkliZW`HtsRELZ}KLw=k`QTfZxLM{q@tun+NvpZ zmRIj*`nzZ;-D}dI3kx#oJPu}BME)kaFgbX9hytCPg(X@7zhX6(qlMLCA{7t5tqkG# zxfDi3L#^qQ>)vE%H+S*^(yX!oG)K+Walh!tD>=wLAc>b8#vJ(@lOtvXN9uhi}y zvo5b~d1i#7oY||QSNXlv6Zy?@erdMk5(e2u;7BPwaw54+U$R$y0a?e-Fa6Zz*Y}94J#`43(h08V) zQ*Tk}83#jjkK1-hUxm?s+!!cio={_1XK34C&!*Zw6rBiy8W~i~I-u5$GgE*4f_{00 zXMgl$CQyy_q+K8-08dxX-d#1}@Krgj1`|uPH%Ygn`NlCWRiF%9PC)L=jFvv^r!8Bk zUV6okk#_=0&4H7fbWQsS)6lgbngrzviby7!3FWgAv?!U!b36>e{V`Gv&`ZbdO@%7v zA4S#Hn(6y~LNFaUnFJVhRBV2K_VG~hy&Xq>Gg!Zf^b9`hGzZP|p$PZg30s_~+WvU3 znfp-wed_9rc-6cwthz?eITkb{3x>DO&Mk=TaiYFnwMB)OsLH!l%vgfOR?uO8X{XFlgc9 z0I3WpYFNW#HMzKKQM?42yp9f;hduwyh%70nme!C}UJq9_?1@t&hd7MBIW(*@$yH9N z&RZl^@+20-!=h6EZKn&>)Q_c4H__FTYkcQjt`vQieKE*qT-YqHuV4zRWqv2qk>-E)v?IbRTN=z1-Gm~=O zwjoV4GN?1Y+7>WC-m1GE(lkycP1ERNtdexWjnWpMe*aAu%c*+sxrJ`USlZ8*-a!tw zoXmFGO}q^A>nHKQ4SR(-RUTHDgFZc#3<~KE{{@^iZ+VqM@`2F%J=K*DRH50i;Ovvp z801+Ped@xFb1_;^571>#QU?c)nkmn6xvfjDp7e233GO|mmS_2f5Rwb^0|p|URr zno*CMk0F_NoZFHyzZX!;WAkg6<{Kv&pf~G_7dreD?+swbg2~fE8P5 z1!)b#pO5Ks?!UU!#iCODv|NJOVeOi68dhm|It5`#ha*W8><39|?&(LSE*H z=IalKX^{e+3+vAx5j}o$&!$UR>UH;%Uz?@<3R1M)bXqe^Y5qi+B7|m>>EoHN?UeL1 z)u^(cvHV$B9}tmY7z!yWm2iJ#phFZtJ<8irMR`B7=L}EMldK7Is)nfS%=x4x8{Egh ztoXfRgDCGpT8~)M0?L*k#ZVanrG?RRu((FV_vf5RB?aqg{K!-$D}PHwQOBWG*;1V` znO2flHIP=R2an&7AzEz`3561sX}q$FE9KY0=(7-0fIud}%+oWe*y`ms-HPB4>}r-F zMXcYK{<_iqC6i!qlX%NlV!1%uh;ay0=;-M(i7;e;vZ9}eF_;)v{49y>Gi8|th9 z+c+E8#sx%js8K_JzyHZ050B?eax5=M+3Rc5R%=(^w`66ek|ZPE)&?oG7lz0teA+ls zt^5!xK6_%P9BHO47aYG4C}0?qrP9F0t@6kAb-n=E6rr#t(~d^u%lCPJ(2MD(0;J~h zd|wSXlf!-fZ2A+QW~UMTN;W*%na1X`SLcn*aRI+QNcW%^x!U6GP%A5!R!sI;2eLrg zppamDtOeJYaW94lG8&B&y`YdFoju9Q`(PjKCt1YE#uXj9U@&$G%Ue&-VN$&eutVUwupn8#aYo(KLO$ecnq|jxeQ)m$BKU>y-u4 zX?|0Cq)VV7YgNDGGxgjg9(lpLY#~=3R)3rUSP?+x{tX&XyL`v&jouG@JsgZ&6sVDy z;6anzhjV%bM&l#9&5`Z&rLB4g?;@z*z##qn(69;s(F!!_X=CI~oht+dtyxd8RWHUn z=YG;{{ds1e1)(Wn8j?h-d)B)m2UmTi>pbN;p#*xPFf6~I{^u#nP}n!#Z{lHb0jQ$7 zrwUy5n(xvy$wXt;t_!l0zr5)dEokIaw~+tsKCtzX_D@R%R;>~>A;j|`5ZwSOrczbN zKYAuN)t{vAlBO;UQ0-AZ`Do)U<@h&=TZCytnIQTOPY^n6G8B}fznl)KNDleD&-a9y z9cF>XH)EO5T}Lk(T8-b&`UE6dzB;ae!{OCQpY(QjM;nGw?A{(BYPt5503hC#4=M;t??5g#>p14OKZm(L5VH%A@6u8qa@#CW>-V74=b)aMopVAaxcMx zWnzW&A)WkuCV-HJOKP{_-f#*vD*1#L#ftU6zEZhUD=$7TF}LPF7Pv}yM4bb(PF@$f z*`Cy{P^nf(p0?9YD2U_mIujeQD^IU2J{*;2$Ud#^-J4a;(F8@9aGL_jiR1j#(^{cK z%Pw{*bF?&XuikLd7{jPyUx>qVwcRg!y@*_si41YPCG?HD>ak{MshoihE>gr^g#RDs zn4Ujrl8-XZ%#7b4L7+lGecub}Tan(!jH4zLdNUsIU;Pz<0 zd8yzrai7#e^X1fIjQVa%^;8nux@7+Bb&n>G5~r#%ex@e<$jr?p1qtuIZ$3+kl6gVh zXRm^6W^D2$?j2~v5IxAx)_Kg|B&C1~QU8&tUX~*NH6uCWDzf%#Gb7h`)HVErpc=tU zK1-Y5Zf~tdKX0mDM_ZB>oT^1fmXQ=L;3_0&Dp2*+GDo$2LDVY(6BH#I%8aqDcX=eJ z4)W5qe~sAj>TQY$FP^&I+-0oF3FmkjDu$Md)c_n^?u5;nUL?o&7FLMsQvH)nosT1e zq6P2Axz?js`8+PW1E=g|q%_;jhEGfiCV$TDXppsTu;CkDCWj0aCJXWC!uk(k$d>sh zf~-7oB24+u5}k52vXU;IHnJc}3HNDG-J?p}Usqx>e#?y3O$d=|`+vH+4xpx%E=)ow z0qIQ;sZykOPy|8~RJ!yU=}7NABp`@@R4Gak6ch!KCLK(a4pI~asS(l8OX&T-Jow){ z&D^=OGns7e?w&p8?Du`U_ap5v!})>f5Jurs_`F#+KekS!hrO*J*);_`^!xHtXceHRd{hPXD;?MBxK&Ow@(B(Ez0VxssCdL+SJheE-q9vxT8YQJBx2Rm5-RvQyqPA`eYm)H{eKUl)M0kkwYGxA!Az9}x z4a}GwfAJfWXjg|-KC{UgKQ93mCI99}0w#|tR>(m~XJfd%534vI*0%Z+{q{TMyk@n* zl?0iv0_fD9b#Qx4nUmS zbp4cvIL6~Oq~f^z%%Y}kB-l})_?B_XO8RH5X3i4Zhr^o*t>g}@X2b_qXF893HARd3 z3>+L}U2KC8t*2N&B}n~p=G3Ag_7~D4bCOlbWdbs9QY~++jLw zE&m|$=s3Bs$`|tdkoS+;f{fvx?t0V`C>5u**IFeU%eJB#9K0i@`UTad9s~WSz8NqW z(CE%{(t#4L^Fw5V3Up4r`>wIp*?qwi&eUWtE=O29>~sp?!1K3*tFndh>=|t_k$&?r z9^~+F_``vKs-?Xv|1e}IA1FU@n=fbofht-7%}y{OHrH=0=&AK!!N*fyEwEm#yV6S$ z?%g~*>D>>|c=;xz{0eU*d^SLpv6`)$Uy*8-nX7u$IZV^y7xgzVjPu90%FPe>-ieF3 zj#lFC6W8YY3`O}`IDbicf-a{qi_Y}y3*Q5zNZ9^Gpy+z-DOa4WNTvS zGuEy5q^Pfz^Nhh(SJ3HvV`l`BJZS<^Mqr2DRmC_p%i*VFel-r%!+d6zD?6b-DE~2f z8Wd;lLR8%moO(FD;I)yrm3ZPL<>Kc%2MwBeBsmTC--fIN5_a*e%)aX{^p{rkCUT+n zbf)U%rk|$**X;T!6dK#SslrH2MaL5yYQf!*XQI!i&FmS>)HJJiz^fCb-H3^Q1{Dnn ziAA<;vV}-00JI(|k7Iz+`|u;uAay?M68T3R2D6-n>0rV7@^iiOjStBB0WLx`33xL| z`z+#w+%K-tNui24PZQknj3DRc+Vkq-U@q)3o8^rya9Q8jgM~Ai_LQsz?l30WAQCSlIVvmp zb3bpRadYU0@%t}xby`t?aT%2=G0~l`fLIHeZs^Lt1g|^+>`=n1)8S_-xazhW*)C3C z)ie(;f~`z;X?>>7W(y>Lr%w9pKHv_yWPHFH-n>5P@v>3N__t$I7jytH7cAxW2K|8~rYwAmFG$Idu6fX_h z>g@Q8DO}={GC6TS|G8$#z&?b#(0rwIqe4ynEgFZ6yV7#F71advmHs^G%{O+>o%`-- z{MLS;b6r{8N(3neVANev%0%bp7c^o0U6Un-QutRQ)!u}#JTY9ewr1?O$%b3ib_v=AT6=5_Ip)|^BiUhqw; zknf|~s zX6y)+%HLu3sR|TF<#8u#l#ej)FpE)`4sn8UU{R`P(YCgA|5k8T0C12Iwq(-+2vKDs zzj+OWzIFvez5Z-=eQqxeRj*uH_bxr=pcLvSh`JUT-Hy1Ph1QUlquJcFQTDGsHX^|^ z3jW!66cddcy&U;l?!{Q#JkzouU?tfNs0P!i_ulfSj`h3*vc)bJWUrh0-$!$yjeJwL~k`xewx! z>ac5RF)qd(;Q9T?Mjd2aLZ0(r zCW)JnY?u`250IjDz6l()RrW-|_qp`cX7`OTN{WY95D%X(AA8UdI1zLo6?E3f%dUmt zJ8ylU`s9bb3bU)PjNUpZ{?m$JE3_lzJ7TLc*hR0kU0AKocFBI(_Lr#RU_4tIpPEg! zly9Ex-MTDvF$W+JsDOH{^KED057VZqma~xsH;eGUq!XV6x-0uw5Rj>GZ&N>DB%A;4 zrVp8TYUUtC;2R6az4Ck~`>zGh#oNfx1tl#BFg85YPX&|ZbXYeS~L z9wg-!oDeIGxw}t+iL?j}H$&PGjT_^KW-`jOxM-CTqj$${^-bQb?biFHz#y=HUEdG) zcG)cKW8O>QYix1?u|jVGe&Dp>tu*|qJ*za4vyRGt=8~!lt=E<13P@rk|r(OBc*x6<~F%~uXx#a84l}@>tQ$1t{^j8A>t$O zIC(D-u+zYnRw56aM**BV#1^0ZSA=rbg5jc|XnR;_FqF>cIKy>U3Vs0|3g`0TTMfk; zV~6hm@|Tt=&?I4)lD$apSB+l@?(L%1c5@*@7ixnI?<|lt(F(jp%|6tt=jLkg5i>{Z z&M;HWhT-FrX}pcg+O4`EA;~ZRbQ|EONCpq{BR@sC7{~XGI7U_c%y{3T^2=y2%ek@G zV34Zkg<1;2i`Q6y;gIyJ(EXg_ktL<3Lo+BjMGq{(hNnbF-umg`zSHhNU?qhV0OX7g zhL_3ked?to_gTrTfmiw4^9=8%I)(8yhh9va^1X>8A(>&GGQLpSZuN93%(Q(QPseUM z`4b<~eUNBxuxGeQqC^(-A+4V!5`suCBp6-t5S@H?y!RouBXh#3jM~nO7+~i;lLev zS<&g)Ta{Hdl*2oD?4@GAvUVSPj(_lMy7!Ln+eE69;?&J()~OT>;H3}_zeUnaT#KGM zKFyW#wchJ3^zGV3GFr9urSX68oyu#llj6P0Dk~ZnQ(T@^jJG$ubB8DH_jQ^;3SnGQ zT1>_y%f-W0x;qK9wt%9lnZr~{-VcyuaOc4uM@GXt?dLeKR>4tDeR-co!nYlQ2~$Z3 zG6SG4hjC-w&u&egqx$%t#3LUE_3Js>PsgNAhoePRbDC6E1^Fa-uU=UfDzNAiRMC3> zmVO?cuANT;eI#s3nj$3ps3(ZMt$J43F)#H`kAj}%;Mk^vl2(@Puy}=dUG(f&v;kQX zcM_?E*8|>*Z-5e`qQ~%M`cZO`Dp_ItpFalVaP(lGJ0|5l5hO?SLIP~gKe1DN>+c=1FSdV(TP9s1{vqZ2v-P>NSp3vz1&p?BqbdXjBVEapUc|$jZi=H za_WZ6L#7*#d8Yj))hj zN}K!Pm1GqN(IwlQ+3r+V07nr@Bl0G^tYK;C09o;@wezqe%7|! zL+)N7Inu^Hzq0Up)E~V}G#^Np$@R2t0cAzryY2^5&RwT3!Za%Bwi)Jf*?IDNCi$4J4l=z~7{$j}?LNW9!%Hlmde z0@W)<2-TTHP}F^>2EH}t9&nFTm&o3}*ID1aC{8ZN6jb)^)t<}W2G5_4oPK{1)Juv| zN~iJGd8JS%PItZy(qL4sIhiVZpB=eP$V9*dl1-Z4?z#PcE$mq(=MmszFe`*#?!83v zY~<#PAM6<#aB#DF{uojGy)DNdB+5r{I~7M7^$R8<4u3^&V5-T29Om@Nj2sU1WVqWV zzC_P|Tv{Gk%0@f$h#5fQH};=bC$139$ViY{5*>i+I18m%-mDbi-2Y7e^w>~qoD?+% zh!to8`7)albnbS~LYFYGv>m!6MoSdGp%wt11OewGf)9vygn`higGIbw`8(rIjzSxw zOHjk$hD%=~-h<0X%0M3dgE1w*lID>nypWvHg`N&63w}#-$$pWvl;nVY)dVh*KcqsA z)Ijkeom9m7aj2UQEGHiQ=^W9kbOXUo{xFRAF!47c8Hfx~4dew#l&FRr7j+E%(E8?w zgkRUHE{0nSHFzsO6B2K}@(ht|P3%Uv0kR;-zQ=yKxyD$t;5B1SNJapBZm8?1m8rlZ F{|6_A8`J;* diff --git a/contrib/terraform/aws/modules/iam/main.tf b/contrib/terraform/aws/modules/iam/main.tf deleted file mode 100644 index a35afc7e596..00000000000 --- a/contrib/terraform/aws/modules/iam/main.tf +++ /dev/null @@ -1,141 +0,0 @@ -#Add AWS Roles for Kubernetes - -resource "aws_iam_role" "kube_control_plane" { - name = "kubernetes-${var.aws_cluster_name}-master" - - assume_role_policy = < 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip))) -} - -output "aws_nlb_api_fqdn" { - value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}" -} - -output "inventory" { - value = data.template_file.inventory.rendered -} - -output "default_tags" { - value = var.default_tags -} diff --git a/contrib/terraform/aws/sample-inventory/cluster.tfvars b/contrib/terraform/aws/sample-inventory/cluster.tfvars deleted file mode 100644 index 8aca21909a4..00000000000 --- a/contrib/terraform/aws/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,59 +0,0 @@ -#Global Vars -aws_cluster_name = "devtest" - -#VPC Vars -aws_vpc_cidr_block = "10.250.192.0/18" - -aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] - -aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] - -#Bastion Host -aws_bastion_num = 1 - -aws_bastion_size = "t2.medium" - -#Kubernetes Cluster - -aws_kube_master_num = 3 - -aws_kube_master_size = "t2.medium" - -aws_kube_master_disk_size = 50 - -aws_etcd_num = 3 - -aws_etcd_size = "t2.medium" - -aws_etcd_disk_size = 50 - -aws_kube_worker_num = 4 - -aws_kube_worker_size = "t2.medium" - -aws_kube_worker_disk_size = 50 - -#Settings AWS NLB - -aws_nlb_api_port = 6443 - -k8s_secure_api_port = 6443 - -default_tags = { - # Env = "devtest" # Product = "kubernetes" -} - -inventory_file = "../../../inventory/hosts" - -## Credentials -#AWS Access Key -AWS_ACCESS_KEY_ID = "" - -#AWS Secret Key -AWS_SECRET_ACCESS_KEY = "" - -#EC2 SSH Key Name -AWS_SSH_KEY_NAME = "" - -#AWS Region -AWS_DEFAULT_REGION = "eu-central-1" diff --git a/contrib/terraform/aws/sample-inventory/group_vars b/contrib/terraform/aws/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/aws/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl deleted file mode 100644 index 10a3995e1bd..00000000000 --- a/contrib/terraform/aws/templates/inventory.tpl +++ /dev/null @@ -1,27 +0,0 @@ -[all] -${connection_strings_master} -${connection_strings_node} -${connection_strings_etcd} -${public_ip_address_bastion} - -[bastion] -${public_ip_address_bastion} - -[kube_control_plane] -${list_master} - -[kube_node] -${list_node} - -[etcd] -${list_etcd} - -[calico_rr] - -[k8s_cluster:children] -kube_node -kube_control_plane -calico_rr - -[k8s_cluster:vars] -${nlb_api_fqdn} diff --git a/contrib/terraform/aws/terraform.tfvars b/contrib/terraform/aws/terraform.tfvars deleted file mode 100644 index 693fa9bfbd9..00000000000 --- a/contrib/terraform/aws/terraform.tfvars +++ /dev/null @@ -1,43 +0,0 @@ -#Global Vars -aws_cluster_name = "devtest" - -#VPC Vars -aws_vpc_cidr_block = "10.250.192.0/18" -aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] -aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] - -# single AZ deployment -#aws_cidr_subnets_private = ["10.250.192.0/20"] -#aws_cidr_subnets_public = ["10.250.224.0/20"] - -# 3+ AZ deployment -#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"] -#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"] - -#Bastion Host -aws_bastion_num = 1 -aws_bastion_size = "t3.small" - -#Kubernetes Cluster -aws_kube_master_num = 3 -aws_kube_master_size = "t3.medium" -aws_kube_master_disk_size = 50 - -aws_etcd_num = 0 -aws_etcd_size = "t3.medium" -aws_etcd_disk_size = 50 - -aws_kube_worker_num = 4 -aws_kube_worker_size = "t3.medium" -aws_kube_worker_disk_size = 50 - -#Settings AWS ELB -aws_nlb_api_port = 6443 -k8s_secure_api_port = 6443 - -default_tags = { - # Env = "devtest" - # Product = "kubernetes" -} - -inventory_file = "../../../inventory/hosts" diff --git a/contrib/terraform/aws/terraform.tfvars.example b/contrib/terraform/aws/terraform.tfvars.example deleted file mode 100644 index 584b6a23659..00000000000 --- a/contrib/terraform/aws/terraform.tfvars.example +++ /dev/null @@ -1,33 +0,0 @@ -#Global Vars -aws_cluster_name = "devtest" - -#VPC Vars -aws_vpc_cidr_block = "10.250.192.0/18" -aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"] -aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"] -aws_avail_zones = ["eu-central-1a","eu-central-1b"] - -#Bastion Host -aws_bastion_num = 1 -aws_bastion_size = "t3.small" - -#Kubernetes Cluster -aws_kube_master_num = 3 -aws_kube_master_size = "t3.medium" -aws_kube_master_disk_size = 50 - -aws_etcd_num = 3 -aws_etcd_size = "t3.medium" -aws_etcd_disk_size = 50 - -aws_kube_worker_num = 4 -aws_kube_worker_size = "t3.medium" -aws_kube_worker_disk_size = 50 - -#Settings AWS ELB -aws_nlb_api_port = 6443 -k8s_secure_api_port = 6443 - -default_tags = { } - -inventory_file = "../../../inventory/hosts" diff --git a/contrib/terraform/aws/variables.tf b/contrib/terraform/aws/variables.tf deleted file mode 100644 index 783d4adffbb..00000000000 --- a/contrib/terraform/aws/variables.tf +++ /dev/null @@ -1,143 +0,0 @@ -variable "AWS_ACCESS_KEY_ID" { - description = "AWS Access Key" -} - -variable "AWS_SECRET_ACCESS_KEY" { - description = "AWS Secret Key" -} - -variable "AWS_SSH_KEY_NAME" { - description = "Name of the SSH keypair to use in AWS." -} - -variable "AWS_DEFAULT_REGION" { - description = "AWS Region" -} - -//General Cluster Settings - -variable "aws_cluster_name" { - description = "Name of AWS Cluster" -} - -variable "ami_name_pattern" { - description = "The name pattern to use for AMI lookup" - type = string - default = "debian-10-amd64-*" -} - -variable "ami_virtualization_type" { - description = "The virtualization type to use for AMI lookup" - type = string - default = "hvm" -} - -variable "ami_owners" { - description = "The owners to use for AMI lookup" - type = list(string) - default = ["136693071363"] -} - -data "aws_ami" "distro" { - most_recent = true - - filter { - name = "name" - values = [var.ami_name_pattern] - } - - filter { - name = "virtualization-type" - values = [var.ami_virtualization_type] - } - - owners = var.ami_owners -} - -//AWS VPC Variables - -variable "aws_vpc_cidr_block" { - description = "CIDR Block for VPC" -} - -variable "aws_cidr_subnets_private" { - description = "CIDR Blocks for private subnets in Availability Zones" - type = list(string) -} - -variable "aws_cidr_subnets_public" { - description = "CIDR Blocks for public subnets in Availability Zones" - type = list(string) -} - -//AWS EC2 Settings - -variable "aws_bastion_size" { - description = "EC2 Instance Size of Bastion Host" -} - -/* -* AWS EC2 Settings -* The number should be divisable by the number of used -* AWS Availability Zones without an remainder. -*/ -variable "aws_bastion_num" { - description = "Number of Bastion Nodes" -} - -variable "aws_kube_master_num" { - description = "Number of Kubernetes Master Nodes" -} - -variable "aws_kube_master_disk_size" { - description = "Disk size for Kubernetes Master Nodes (in GiB)" -} - -variable "aws_kube_master_size" { - description = "Instance size of Kube Master Nodes" -} - -variable "aws_etcd_num" { - description = "Number of etcd Nodes" -} - -variable "aws_etcd_disk_size" { - description = "Disk size for etcd Nodes (in GiB)" -} - -variable "aws_etcd_size" { - description = "Instance size of etcd Nodes" -} - -variable "aws_kube_worker_num" { - description = "Number of Kubernetes Worker Nodes" -} - -variable "aws_kube_worker_disk_size" { - description = "Disk size for Kubernetes Worker Nodes (in GiB)" -} - -variable "aws_kube_worker_size" { - description = "Instance size of Kubernetes Worker Nodes" -} - -/* -* AWS NLB Settings -* -*/ -variable "aws_nlb_api_port" { - description = "Port for AWS NLB" -} - -variable "k8s_secure_api_port" { - description = "Secure Port of K8S API Server" -} - -variable "default_tags" { - description = "Default tags for all resources" - type = map(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} diff --git a/contrib/terraform/exoscale/README.md b/contrib/terraform/exoscale/README.md deleted file mode 100644 index be451cce816..00000000000 --- a/contrib/terraform/exoscale/README.md +++ /dev/null @@ -1,152 +0,0 @@ -# Kubernetes on Exoscale with Terraform - -Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster - +-----------------------+ -+---------------+ | +--------------+ | -| | | | +--------------+ | -| API server LB +---------> | | | | -| | | | | Master/etcd | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - | ^ | - | | | - | v | -+---------------+ | +--------------+ | -| | | | +--------------+ | -| Ingress LB +---------> | | | | -| | | | | Worker | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - +-----------------------+ -``` - -## Requirements - -* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files) - -## Quickstart - -NOTE: *Assumes you are at the root of the kubespray repo* - -Copy the sample inventory for your cluster and copy the default terraform variables. - -```bash -CLUSTER=my-exoscale-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/ -cd inventory/$CLUSTER -``` - -Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`. - -```bash -# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc. -$EDITOR default.tfvars -``` - -For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`. -The file should look like something like this: - -```ini -[cloudstack] -key = -secret = -``` - -Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys. - -### Encrypted credentials - -To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime. - -```bash -cat << EOF > cloudstack.ini -[cloudstack] -key = -secret = -EOF -sops --encrypt --in-place --pgp cloudstack.ini -sops cloudstack.ini -``` - -Run terraform to create the infrastructure - -```bash -terraform init ../../contrib/terraform/exoscale -terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale -``` - -If your cloudstack credentials file is encrypted using sops, run the following: - -```bash -terraform init ../../contrib/terraform/exoscale -sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale' -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can now copy your inventory file and use it with kubespray to set up a cluster. -You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -Example to use this with the default sample inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Teardown - -The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy: - -```bash -terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale -``` - -## Variables - -### Required - -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `zone`: The zone where to run the cluster -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `size`: The size to use - * `boot_disk`: The boot disk to use - * `image_name`: Name of the image - * `root_partition_size`: Size *(in GB)* for the root partition - * `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)* - * `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)* -* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes -* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server -* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) - -### Optional - -* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)* - -An example variables file can be found `default.tfvars` - -## Known limitations - -### Only single disk - -Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local). - -### No Kubernetes API - -The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager). -This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode. diff --git a/contrib/terraform/exoscale/default.tfvars b/contrib/terraform/exoscale/default.tfvars deleted file mode 100644 index 8388d586adc..00000000000 --- a/contrib/terraform/exoscale/default.tfvars +++ /dev/null @@ -1,65 +0,0 @@ -prefix = "default" -zone = "ch-gva-2" - -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "standard.medium", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-0" : { - "node_type" : "worker", - "size" : "standard.large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-1" : { - "node_type" : "worker", - "size" : "standard.large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-2" : { - "node_type" : "worker", - "size" : "standard.large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/exoscale/main.tf b/contrib/terraform/exoscale/main.tf deleted file mode 100644 index eb9fcabcdd3..00000000000 --- a/contrib/terraform/exoscale/main.tf +++ /dev/null @@ -1,49 +0,0 @@ -provider "exoscale" {} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - - prefix = var.prefix - zone = var.zone - machines = var.machines - - ssh_public_keys = var.ssh_public_keys - - ssh_whitelist = var.ssh_whitelist - api_server_whitelist = var.api_server_whitelist - nodeport_whitelist = var.nodeport_whitelist -} - -# -# Generate ansible inventory -# - -data "template_file" "inventory" { - template = file("${path.module}/templates/inventory.tpl") - - vars = { - connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", - keys(module.kubernetes.master_ip_addresses), - values(module.kubernetes.master_ip_addresses).*.public_ip, - values(module.kubernetes.master_ip_addresses).*.private_ip, - range(1, length(module.kubernetes.master_ip_addresses) + 1))) - connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", - keys(module.kubernetes.worker_ip_addresses), - values(module.kubernetes.worker_ip_addresses).*.public_ip, - values(module.kubernetes.worker_ip_addresses).*.private_ip)) - - list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) - list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) - api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address - } -} - -resource "null_resource" "inventories" { - provisioner "local-exec" { - command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" - } - - triggers = { - template = data.template_file.inventory.rendered - } -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 3ea4f4f2c7f..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,191 +0,0 @@ -data "exoscale_template" "os_image" { - for_each = var.machines - - zone = var.zone - name = each.value.boot_disk.image_name -} - -data "exoscale_compute_instance" "master_nodes" { - for_each = exoscale_compute_instance.master - - id = each.value.id - zone = var.zone -} - -data "exoscale_compute_instance" "worker_nodes" { - for_each = exoscale_compute_instance.worker - - id = each.value.id - zone = var.zone -} - -resource "exoscale_private_network" "private_network" { - zone = var.zone - name = "${var.prefix}-network" - - start_ip = cidrhost(var.private_network_cidr, 1) - # cidr -1 = Broadcast address - # cidr -2 = DHCP server address (exoscale specific) - end_ip = cidrhost(var.private_network_cidr, -3) - netmask = cidrnetmask(var.private_network_cidr) -} - -resource "exoscale_compute_instance" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - template_id = data.exoscale_template.os_image[each.key].id - type = each.value.size - disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size - state = "Running" - zone = var.zone - security_group_ids = [exoscale_security_group.master_sg.id] - network_interface { - network_id = exoscale_private_network.private_network.id - } - elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id] - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address - node_local_partition_size = each.value.boot_disk.node_local_partition_size - ceph_partition_size = each.value.boot_disk.ceph_partition_size - root_partition_size = each.value.boot_disk.root_partition_size - node_type = "master" - ssh_public_keys = var.ssh_public_keys - } - ) -} - -resource "exoscale_compute_instance" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - template_id = data.exoscale_template.os_image[each.key].id - type = each.value.size - disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size - state = "Running" - zone = var.zone - security_group_ids = [exoscale_security_group.worker_sg.id] - network_interface { - network_id = exoscale_private_network.private_network.id - } - elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id] - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address - node_local_partition_size = each.value.boot_disk.node_local_partition_size - ceph_partition_size = each.value.boot_disk.ceph_partition_size - root_partition_size = each.value.boot_disk.root_partition_size - node_type = "worker" - ssh_public_keys = var.ssh_public_keys - } - ) -} - -resource "exoscale_security_group" "master_sg" { - name = "${var.prefix}-master-sg" - description = "Security group for Kubernetes masters" -} - -resource "exoscale_security_group_rule" "master_sg_rule_ssh" { - security_group_id = exoscale_security_group.master_sg.id - - for_each = toset(var.ssh_whitelist) - # SSH - type = "INGRESS" - start_port = 22 - end_port = 22 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" { - security_group_id = exoscale_security_group.master_sg.id - - for_each = toset(var.api_server_whitelist) - # Kubernetes API - type = "INGRESS" - start_port = 6443 - end_port = 6443 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_security_group" "worker_sg" { - name = "${var.prefix}-worker-sg" - description = "security group for kubernetes worker nodes" -} - -resource "exoscale_security_group_rule" "worker_sg_rule_ssh" { - security_group_id = exoscale_security_group.worker_sg.id - - # SSH - for_each = toset(var.ssh_whitelist) - type = "INGRESS" - start_port = 22 - end_port = 22 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_security_group_rule" "worker_sg_rule_http" { - security_group_id = exoscale_security_group.worker_sg.id - - # HTTP(S) - for_each = toset(["80", "443"]) - type = "INGRESS" - start_port = each.value - end_port = each.value - protocol = "TCP" - cidr = "0.0.0.0/0" -} - - -resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" { - security_group_id = exoscale_security_group.worker_sg.id - - # HTTP(S) - for_each = toset(var.nodeport_whitelist) - type = "INGRESS" - start_port = 30000 - end_port = 32767 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_elastic_ip" "ingress_controller_lb" { - zone = var.zone - healthcheck { - mode = "http" - port = 80 - uri = "/healthz" - interval = 10 - timeout = 2 - strikes_ok = 2 - strikes_fail = 3 - } -} - -resource "exoscale_elastic_ip" "control_plane_lb" { - zone = var.zone - healthcheck { - mode = "tcp" - port = 6443 - interval = 10 - timeout = 2 - strikes_ok = 2 - strikes_fail = 3 - } -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf deleted file mode 100644 index b288bdb49ec..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,31 +0,0 @@ -output "master_ip_addresses" { - value = { - for key, instance in exoscale_compute_instance.master : - instance.name => { - "private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : "" - "public_ip" = exoscale_compute_instance.master[key].ip_address - } - } -} - -output "worker_ip_addresses" { - value = { - for key, instance in exoscale_compute_instance.worker : - instance.name => { - "private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : "" - "public_ip" = exoscale_compute_instance.worker[key].ip_address - } - } -} - -output "cluster_private_network_cidr" { - value = var.private_network_cidr -} - -output "ingress_controller_lb_ip_address" { - value = exoscale_elastic_ip.ingress_controller_lb.ip_address -} - -output "control_plane_lb_ip_address" { - value = exoscale_elastic_ip.control_plane_lb.ip_address -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl b/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl deleted file mode 100644 index a81b8e38a42..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl +++ /dev/null @@ -1,52 +0,0 @@ -#cloud-config -%{ if ceph_partition_size > 0 || node_local_partition_size > 0} -bootcmd: -- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ] -%{ if node_local_partition_size > 0 } - # Create partition for node local storage -- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ] -- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ] -%{ endif } -%{ if ceph_partition_size > 0 } - # Create partition for rook to use for ceph -- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ] -%{ endif } -%{ endif } - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} - -write_files: - - path: /etc/netplan/eth1.yaml - content: | - network: - version: 2 - ethernets: - eth1: - dhcp4: true -%{ if node_type == "worker" } - # TODO: When a VM is seen as healthy and is added to the EIP loadbalancer - # pool it no longer can send traffic back to itself via the EIP IP - # address. - # Remove this if it ever gets solved. - - path: /etc/netplan/20-eip-fix.yaml - content: | - network: - version: 2 - ethernets: - "lo:0": - match: - name: lo - dhcp4: false - addresses: - - ${eip_ip_address}/32 -%{ endif } -runcmd: - - netplan apply -%{ if node_local_partition_size > 0 } - - mkdir -p /mnt/disks/node-local-storage - - chown nobody:nogroup /mnt/disks/node-local-storage - - mount /dev/vda2 /mnt/disks/node-local-storage -%{ endif } diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index c466abfe15b..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,42 +0,0 @@ -variable "zone" { - type = string - # This is currently the only zone that is supposed to be supporting - # so called "managed private networks". - # See: https://www.exoscale.com/syslog/introducing-managed-private-networks - default = "ch-gva-2" -} - -variable "prefix" {} - -variable "machines" { - type = map(object({ - node_type = string - size = string - boot_disk = object({ - image_name = string - root_partition_size = number - ceph_partition_size = number - node_local_partition_size = number - }) - })) -} - -variable "ssh_public_keys" { - type = list(string) -} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "private_network_cidr" { - default = "172.0.10.0/24" -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 047420aecea..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - exoscale = { - source = "exoscale/exoscale" - version = ">= 0.21" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/exoscale/output.tf b/contrib/terraform/exoscale/output.tf deleted file mode 100644 index 09bf7fa4a12..00000000000 --- a/contrib/terraform/exoscale/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ips" { - value = module.kubernetes.master_ip_addresses -} - -output "worker_ips" { - value = module.kubernetes.worker_ip_addresses -} - -output "ingress_controller_lb_ip_address" { - value = module.kubernetes.ingress_controller_lb_ip_address -} - -output "control_plane_lb_ip_address" { - value = module.kubernetes.control_plane_lb_ip_address -} diff --git a/contrib/terraform/exoscale/sample-inventory/cluster.tfvars b/contrib/terraform/exoscale/sample-inventory/cluster.tfvars deleted file mode 100644 index f6152412647..00000000000 --- a/contrib/terraform/exoscale/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,65 +0,0 @@ -prefix = "default" -zone = "ch-gva-2" - -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "Small", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-0" : { - "node_type" : "worker", - "size" : "Large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-1" : { - "node_type" : "worker", - "size" : "Large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-2" : { - "node_type" : "worker", - "size" : "Large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/exoscale/sample-inventory/group_vars b/contrib/terraform/exoscale/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/exoscale/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/exoscale/templates/inventory.tpl b/contrib/terraform/exoscale/templates/inventory.tpl deleted file mode 100644 index 85ed1924b1d..00000000000 --- a/contrib/terraform/exoscale/templates/inventory.tpl +++ /dev/null @@ -1,19 +0,0 @@ -[all] -${connection_strings_master} -${connection_strings_worker} - -[kube_control_plane] -${list_master} - -[kube_control_plane:vars] -supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ] - -[etcd] -${list_master} - -[kube_node] -${list_worker} - -[k8s_cluster:children] -kube_control_plane -kube_node diff --git a/contrib/terraform/exoscale/variables.tf b/contrib/terraform/exoscale/variables.tf deleted file mode 100644 index 14f8455796c..00000000000 --- a/contrib/terraform/exoscale/variables.tf +++ /dev/null @@ -1,46 +0,0 @@ -variable "zone" { - description = "The zone where to run the cluster" -} - -variable "prefix" { - description = "Prefix for resource names" - default = "default" -} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - size = string - boot_disk = object({ - image_name = string - root_partition_size = number - ceph_partition_size = number - node_local_partition_size = number - }) - })) -} - -variable "ssh_public_keys" { - description = "List of public SSH keys which are injected into the VMs." - type = list(string) -} - -variable "ssh_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for ssh" - type = list(string) -} - -variable "api_server_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" - type = list(string) -} - -variable "nodeport_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" - type = list(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} diff --git a/contrib/terraform/exoscale/versions.tf b/contrib/terraform/exoscale/versions.tf deleted file mode 100644 index 0333b41b96a..00000000000 --- a/contrib/terraform/exoscale/versions.tf +++ /dev/null @@ -1,15 +0,0 @@ -terraform { - required_providers { - exoscale = { - source = "exoscale/exoscale" - version = ">= 0.21" - } - null = { - source = "hashicorp/null" - } - template = { - source = "hashicorp/template" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/gcp/README.md b/contrib/terraform/gcp/README.md deleted file mode 100644 index 01e5299db01..00000000000 --- a/contrib/terraform/gcp/README.md +++ /dev/null @@ -1,104 +0,0 @@ -# Kubernetes on GCP with Terraform - -Provision a Kubernetes cluster on GCP using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster - +-----------------------+ -+---------------+ | +--------------+ | -| | | | +--------------+ | -| API server LB +---------> | | | | -| | | | | Master/etcd | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - | ^ | - | | | - | v | -+---------------+ | +--------------+ | -| | | | +--------------+ | -| Ingress LB +---------> | | | | -| | | | | Worker | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - +-----------------------+ -``` - -## Requirements - -* Terraform 0.12.0 or newer - -## Quickstart - -To get a cluster up and running you'll need a JSON keyfile. -Set the path to the file in the `tfvars.json` file and run the following: - -```bash -terraform apply -var-file tfvars.json -state dev-cluster.tfstate -var gcp_project_id= -var keyfile_location= -``` - -To generate kubespray inventory based on the terraform state file you can run the following: - -```bash -./generate-inventory.sh dev-cluster.tfstate > inventory.ini -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray, e.g. - -```bash -ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v -``` - -## Variables - -### Required - -* `keyfile_location`: Location to the keyfile to use as credentials for the google terraform provider -* `gcp_project_id`: ID of the GCP project to deploy the cluster in -* `ssh_pub_key`: Path to public ssh key to use for all machines -* `region`: The region where to run the cluster -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `size`: The size to use - * `zone`: The zone the machine should run in - * `additional_disks`: Extra disks to add to the machine. Key of this object will be used as the disk name - * `size`: Size of the disk (in GB) - * `boot_disk`: The boot disk to use - * `image_name`: Name of the image - * `size`: Size of the boot disk (in GB) -* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes -* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server -* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) -* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443 -* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule - * `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]` - * `protocol`: Protocol. Example `"tcp"` - * `ports`: List of ports, as string. Example `["53"]` - * `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]` - -### Optional - -* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)* -* `master_sa_email`: Service account email to use for the control plane nodes *(Defaults to `""`, auto generate one)* -* `master_sa_scopes`: Service account email to use for the control plane nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)* -* `master_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible) - for the control plane nodes *(Defaults to `false`)* -* `master_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types) - for extra disks added on the control plane nodes *(Defaults to `"pd-ssd"`)* -* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)* -* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)* -* `worker_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible) - for the worker nodes *(Defaults to `false`)* -* `worker_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types) - for extra disks added on the worker nodes *(Defaults to `"pd-ssd"`)* - -An example variables file can be found `tfvars.json` - -## Known limitations - -This solution does not provide a solution to use a bastion host. Thus all the nodes must expose a public IP for kubespray to work. diff --git a/contrib/terraform/gcp/generate-inventory.sh b/contrib/terraform/gcp/generate-inventory.sh deleted file mode 100755 index 585a4f415eb..00000000000 --- a/contrib/terraform/gcp/generate-inventory.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash - -# -# Generates a inventory file based on the terraform output. -# After provisioning a cluster, simply run this command and supply the terraform state file -# Default state file is terraform.tfstate -# - -set -e - -usage () { - echo "Usage: $0 " >&2 - exit 1 -} - -if [[ $# -ne 1 ]]; then - usage -fi - -TF_STATE_FILE=${1} - -if [[ ! -f "${TF_STATE_FILE}" ]]; then - echo "ERROR: state file ${TF_STATE_FILE} doesn't exist" >&2 - usage -fi - -TF_OUT=$(terraform output -state "${TF_STATE_FILE}" -json) - -MASTERS=$(jq -r '.master_ips.value | to_entries[]' <(echo "${TF_OUT}")) -WORKERS=$(jq -r '.worker_ips.value | to_entries[]' <(echo "${TF_OUT}")) -mapfile -t MASTER_NAMES < <(jq -r '.key' <(echo "${MASTERS}")) -mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}")) - -API_LB=$(jq -r '.control_plane_lb_ip_address.value' <(echo "${TF_OUT}")) - -# Generate master hosts -i=1 -for name in "${MASTER_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${MASTERS}")) - public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${MASTERS}")) - echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip} etcd_member_name=etcd${i}" - i=$(( i + 1 )) -done - -# Generate worker hosts -for name in "${WORKER_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}")) - public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${WORKERS}")) - echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip}" -done - -echo "" -echo "[kube_control_plane]" -for name in "${MASTER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[kube_control_plane:vars]" -echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate -echo "" -echo "[etcd]" -for name in "${MASTER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[kube_node]" -for name in "${WORKER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[k8s_cluster:children]" -echo "kube_control_plane" -echo "kube_node" diff --git a/contrib/terraform/gcp/main.tf b/contrib/terraform/gcp/main.tf deleted file mode 100644 index b0b91f57b35..00000000000 --- a/contrib/terraform/gcp/main.tf +++ /dev/null @@ -1,39 +0,0 @@ -terraform { - required_providers { - google = { - source = "hashicorp/google" - version = "~> 4.0" - } - } -} - -provider "google" { - credentials = file(var.keyfile_location) - region = var.region - project = var.gcp_project_id -} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - region = var.region - prefix = var.prefix - - machines = var.machines - ssh_pub_key = var.ssh_pub_key - - master_sa_email = var.master_sa_email - master_sa_scopes = var.master_sa_scopes - master_preemptible = var.master_preemptible - master_additional_disk_type = var.master_additional_disk_type - worker_sa_email = var.worker_sa_email - worker_sa_scopes = var.worker_sa_scopes - worker_preemptible = var.worker_preemptible - worker_additional_disk_type = var.worker_additional_disk_type - - ssh_whitelist = var.ssh_whitelist - api_server_whitelist = var.api_server_whitelist - nodeport_whitelist = var.nodeport_whitelist - ingress_whitelist = var.ingress_whitelist - - extra_ingress_firewalls = var.extra_ingress_firewalls -} diff --git a/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf b/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf deleted file mode 100644 index a83b73bb251..00000000000 --- a/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,421 +0,0 @@ -################################################# -## -## General -## - -resource "google_compute_network" "main" { - name = "${var.prefix}-network" - - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "main" { - name = "${var.prefix}-subnet" - network = google_compute_network.main.name - ip_cidr_range = var.private_network_cidr - region = var.region -} - -resource "google_compute_firewall" "deny_all" { - name = "${var.prefix}-default-firewall" - network = google_compute_network.main.name - - priority = 1000 - - source_ranges = ["0.0.0.0/0"] - - deny { - protocol = "all" - } -} - -resource "google_compute_firewall" "allow_internal" { - name = "${var.prefix}-internal-firewall" - network = google_compute_network.main.name - - priority = 500 - - source_ranges = [var.private_network_cidr] - - allow { - protocol = "all" - } -} - -resource "google_compute_firewall" "ssh" { - count = length(var.ssh_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-ssh-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.ssh_whitelist - - allow { - protocol = "tcp" - ports = ["22"] - } -} - -resource "google_compute_firewall" "api_server" { - count = length(var.api_server_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-api-server-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.api_server_whitelist - - allow { - protocol = "tcp" - ports = ["6443"] - } -} - -resource "google_compute_firewall" "nodeport" { - count = length(var.nodeport_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-nodeport-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.nodeport_whitelist - - allow { - protocol = "tcp" - ports = ["30000-32767"] - } -} - -resource "google_compute_firewall" "ingress_http" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-http-ingress-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.ingress_whitelist - - allow { - protocol = "tcp" - ports = ["80"] - } -} - -resource "google_compute_firewall" "ingress_https" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-https-ingress-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.ingress_whitelist - - allow { - protocol = "tcp" - ports = ["443"] - } -} - -################################################# -## -## Local variables -## - -locals { - master_target_list = [ - for name, machine in google_compute_instance.master : - "${machine.zone}/${machine.name}" - ] - - worker_target_list = [ - for name, machine in google_compute_instance.worker : - "${machine.zone}/${machine.name}" - ] - - master_disks = flatten([ - for machine_name, machine in var.machines : [ - for disk_name, disk in machine.additional_disks : { - "${machine_name}-${disk_name}" = { - "machine_name": machine_name, - "machine": machine, - "disk_size": disk.size, - "disk_name": disk_name - } - } - ] - if machine.node_type == "master" - ]) - - worker_disks = flatten([ - for machine_name, machine in var.machines : [ - for disk_name, disk in machine.additional_disks : { - "${machine_name}-${disk_name}" = { - "machine_name": machine_name, - "machine": machine, - "disk_size": disk.size, - "disk_name": disk_name - } - } - ] - if machine.node_type == "worker" - ]) -} - -################################################# -## -## Master -## - -resource "google_compute_address" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}-pip" - address_type = "EXTERNAL" - region = var.region -} - -resource "google_compute_disk" "master" { - for_each = { - for item in local.master_disks : - keys(item)[0] => values(item)[0] - } - - name = "${var.prefix}-${each.key}" - type = var.master_additional_disk_type - zone = each.value.machine.zone - size = each.value.disk_size - - physical_block_size_bytes = 4096 -} - -resource "google_compute_attached_disk" "master" { - for_each = { - for item in local.master_disks : - keys(item)[0] => values(item)[0] - } - - disk = google_compute_disk.master[each.key].id - instance = google_compute_instance.master[each.value.machine_name].id -} - -resource "google_compute_instance" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - machine_type = each.value.size - zone = each.value.zone - - tags = ["control-plane", "master", each.key] - - boot_disk { - initialize_params { - image = each.value.boot_disk.image_name - size = each.value.boot_disk.size - } - } - - network_interface { - subnetwork = google_compute_subnetwork.main.name - - access_config { - nat_ip = google_compute_address.master[each.key].address - } - } - - metadata = { - ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}" - } - - service_account { - email = var.master_sa_email - scopes = var.master_sa_scopes - } - - # Since we use google_compute_attached_disk we need to ignore this - lifecycle { - ignore_changes = [attached_disk] - } - - scheduling { - preemptible = var.master_preemptible - automatic_restart = !var.master_preemptible - } -} - -resource "google_compute_forwarding_rule" "master_lb" { - count = length(var.api_server_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-master-lb-forward-rule" - - port_range = "6443" - - target = google_compute_target_pool.master_lb[count.index].id -} - -resource "google_compute_target_pool" "master_lb" { - count = length(var.api_server_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-master-lb-pool" - instances = local.master_target_list -} - -################################################# -## -## Worker -## - -resource "google_compute_disk" "worker" { - for_each = { - for item in local.worker_disks : - keys(item)[0] => values(item)[0] - } - - name = "${var.prefix}-${each.key}" - type = var.worker_additional_disk_type - zone = each.value.machine.zone - size = each.value.disk_size - - physical_block_size_bytes = 4096 -} - -resource "google_compute_attached_disk" "worker" { - for_each = { - for item in local.worker_disks : - keys(item)[0] => values(item)[0] - } - - disk = google_compute_disk.worker[each.key].id - instance = google_compute_instance.worker[each.value.machine_name].id -} - -resource "google_compute_address" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}-pip" - address_type = "EXTERNAL" - region = var.region -} - -resource "google_compute_instance" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - machine_type = each.value.size - zone = each.value.zone - - tags = ["worker", each.key] - - boot_disk { - initialize_params { - image = each.value.boot_disk.image_name - size = each.value.boot_disk.size - } - } - - network_interface { - subnetwork = google_compute_subnetwork.main.name - - access_config { - nat_ip = google_compute_address.worker[each.key].address - } - } - - metadata = { - ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}" - } - - service_account { - email = var.worker_sa_email - scopes = var.worker_sa_scopes - } - - # Since we use google_compute_attached_disk we need to ignore this - lifecycle { - ignore_changes = [attached_disk] - } - - scheduling { - preemptible = var.worker_preemptible - automatic_restart = !var.worker_preemptible - } -} - -resource "google_compute_address" "worker_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-lb-address" - address_type = "EXTERNAL" - region = var.region -} - -resource "google_compute_forwarding_rule" "worker_http_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-http-lb-forward-rule" - - ip_address = google_compute_address.worker_lb[count.index].address - port_range = "80" - - target = google_compute_target_pool.worker_lb[count.index].id -} - -resource "google_compute_forwarding_rule" "worker_https_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-https-lb-forward-rule" - - ip_address = google_compute_address.worker_lb[count.index].address - port_range = "443" - - target = google_compute_target_pool.worker_lb[count.index].id -} - -resource "google_compute_target_pool" "worker_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-lb-pool" - instances = local.worker_target_list -} - -resource "google_compute_firewall" "extra_ingress_firewall" { - for_each = { - for name, firewall in var.extra_ingress_firewalls : - name => firewall - } - - name = "${var.prefix}-${each.key}-ingress" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = each.value.source_ranges - - target_tags = each.value.target_tags - - allow { - protocol = each.value.protocol - ports = each.value.ports - } -} diff --git a/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf b/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf deleted file mode 100644 index d0ffaa93ea9..00000000000 --- a/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,27 +0,0 @@ -output "master_ip_addresses" { - value = { - for key, instance in google_compute_instance.master : - instance.name => { - "private_ip" = instance.network_interface.0.network_ip - "public_ip" = instance.network_interface.0.access_config.0.nat_ip - } - } -} - -output "worker_ip_addresses" { - value = { - for key, instance in google_compute_instance.worker : - instance.name => { - "private_ip" = instance.network_interface.0.network_ip - "public_ip" = instance.network_interface.0.access_config.0.nat_ip - } - } -} - -output "ingress_controller_lb_ip_address" { - value = length(var.ingress_whitelist) > 0 ? google_compute_address.worker_lb.0.address : "" -} - -output "control_plane_lb_ip_address" { - value = length(var.api_server_whitelist) > 0 ? google_compute_forwarding_rule.master_lb.0.ip_address : "" -} diff --git a/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf b/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index bb8d23be06f..00000000000 --- a/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,86 +0,0 @@ -variable "region" { - type = string -} - -variable "prefix" {} - -variable "machines" { - type = map(object({ - node_type = string - size = string - zone = string - additional_disks = map(object({ - size = number - })) - boot_disk = object({ - image_name = string - size = number - }) - })) -} - -variable "master_sa_email" { - type = string -} - -variable "master_sa_scopes" { - type = list(string) -} - -variable "master_preemptible" { - type = bool -} - -variable "master_additional_disk_type" { - type = string -} - -variable "worker_sa_email" { - type = string -} - -variable "worker_sa_scopes" { - type = list(string) -} - -variable "worker_preemptible" { - type = bool -} - -variable "worker_additional_disk_type" { - type = string -} - -variable "ssh_pub_key" {} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "private_network_cidr" { - default = "10.0.10.0/24" -} - -variable "extra_ingress_firewalls" { - type = map(object({ - source_ranges = set(string) - protocol = string - ports = list(string) - target_tags = set(string) - })) - - default = {} -} diff --git a/contrib/terraform/gcp/output.tf b/contrib/terraform/gcp/output.tf deleted file mode 100644 index 09bf7fa4a12..00000000000 --- a/contrib/terraform/gcp/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ips" { - value = module.kubernetes.master_ip_addresses -} - -output "worker_ips" { - value = module.kubernetes.worker_ip_addresses -} - -output "ingress_controller_lb_ip_address" { - value = module.kubernetes.ingress_controller_lb_ip_address -} - -output "control_plane_lb_ip_address" { - value = module.kubernetes.control_plane_lb_ip_address -} diff --git a/contrib/terraform/gcp/tfvars.json b/contrib/terraform/gcp/tfvars.json deleted file mode 100644 index 056b8fe80be..00000000000 --- a/contrib/terraform/gcp/tfvars.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "gcp_project_id": "GCP_PROJECT_ID", - "region": "us-central1", - "ssh_pub_key": "~/.ssh/id_rsa.pub", - - "keyfile_location": "service-account.json", - - "prefix": "development", - - "ssh_whitelist": [ - "1.2.3.4/32" - ], - "api_server_whitelist": [ - "1.2.3.4/32" - ], - "nodeport_whitelist": [ - "1.2.3.4/32" - ], - "ingress_whitelist": [ - "0.0.0.0/0" - ], - - "machines": { - "master-0": { - "node_type": "master", - "size": "n1-standard-2", - "zone": "us-central1-a", - "additional_disks": {}, - "boot_disk": { - "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", - "size": 50 - } - }, - "worker-0": { - "node_type": "worker", - "size": "n1-standard-8", - "zone": "us-central1-a", - "additional_disks": { - "extra-disk-1": { - "size": 100 - } - }, - "boot_disk": { - "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", - "size": 50 - } - }, - "worker-1": { - "node_type": "worker", - "size": "n1-standard-8", - "zone": "us-central1-a", - "additional_disks": { - "extra-disk-1": { - "size": 100 - } - }, - "boot_disk": { - "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", - "size": 50 - } - } - } -} diff --git a/contrib/terraform/gcp/variables.tf b/contrib/terraform/gcp/variables.tf deleted file mode 100644 index 3e960232a97..00000000000 --- a/contrib/terraform/gcp/variables.tf +++ /dev/null @@ -1,108 +0,0 @@ -variable keyfile_location { - description = "Location of the json keyfile to use with the google provider" - type = string -} - -variable region { - description = "Region of all resources" - type = string -} - -variable gcp_project_id { - description = "ID of the project" - type = string -} - -variable prefix { - description = "Prefix for resource names" - default = "default" -} - -variable machines { - description = "Cluster machines" - type = map(object({ - node_type = string - size = string - zone = string - additional_disks = map(object({ - size = number - })) - boot_disk = object({ - image_name = string - size = number - }) - })) -} - -variable "master_sa_email" { - type = string - default = "" -} - -variable "master_sa_scopes" { - type = list(string) - default = ["https://www.googleapis.com/auth/cloud-platform"] -} - -variable "master_preemptible" { - type = bool - default = false -} - -variable "master_additional_disk_type" { - type = string - default = "pd-ssd" -} - -variable "worker_sa_email" { - type = string - default = "" -} - -variable "worker_sa_scopes" { - type = list(string) - default = ["https://www.googleapis.com/auth/cloud-platform"] -} - -variable "worker_preemptible" { - type = bool - default = false -} - -variable "worker_additional_disk_type" { - type = string - default = "pd-ssd" -} - -variable ssh_pub_key { - description = "Path to public SSH key file which is injected into the VMs." - type = string -} - -variable ssh_whitelist { - type = list(string) -} - -variable api_server_whitelist { - type = list(string) -} - -variable nodeport_whitelist { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "extra_ingress_firewalls" { - type = map(object({ - source_ranges = set(string) - protocol = string - ports = list(string) - target_tags = set(string) - })) - - default = {} -} diff --git a/contrib/terraform/group_vars b/contrib/terraform/group_vars deleted file mode 120000 index 4dd828e8e58..00000000000 --- a/contrib/terraform/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../inventory/local/group_vars \ No newline at end of file diff --git a/contrib/terraform/hetzner/README.md b/contrib/terraform/hetzner/README.md deleted file mode 100644 index 79e879c4fe6..00000000000 --- a/contrib/terraform/hetzner/README.md +++ /dev/null @@ -1,122 +0,0 @@ -# Kubernetes on Hetzner with Terraform - -Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster -+--------------------------+ -| +--------------+ | -| | +--------------+ | -| --> | | | | -| | | Master/etcd | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -| ^ | -| | | -| v | -| +--------------+ | -| | +--------------+ | -| --> | | | | -| | | Worker | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -+--------------------------+ -``` - -The nodes uses a private network for node to node communication and a public interface for all external communication. - -## Requirements - -* Terraform 0.14.0 or newer - -## Quickstart - -NOTE: Assumes you are at the root of the kubespray repo. - -For authentication in your cluster you can use the environment variables. - -```bash -export HCLOUD_TOKEN=api-token -``` - -Copy the cluster configuration file. - -```bash -CLUSTER=my-hetzner-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/ -cd inventory/$CLUSTER -``` - -Edit `default.tfvars` to match your requirement. - -Flatcar Container Linux instead of the basic Hetzner Images. - -```bash -cd ../../contrib/terraform/hetzner -``` - -Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and -comment out the `#source = "./modules/kubernetes-cluster"`. - -activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into -Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead. - -Run Terraform to create the infrastructure. - -```bash -cd ./kubespray -terraform -chdir=./contrib/terraform/hetzner/ init -terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can use the inventory file with kubespray to set up a cluster. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -You can setup Kubernetes with kubespray using the generated inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Cloud controller - -For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver). - -Please read the instructions in both repos on how to install it. - -## Teardown - -You can teardown your infrastructure using the following Terraform command: - -```bash -cd ./kubespray -terraform -chdir=./contrib/terraform/hetzner/ destroy --var-file=../../../inventory/$CLUSTER/default.tfvars -``` - -## Variables - -* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `zone`: The zone where to run the cluster -* `network_zone`: the network zone where the cluster is running -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `size`: Size of the VM - * `image`: The image to use for the VM -* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes -* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server -* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) -* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443 diff --git a/contrib/terraform/hetzner/default.tfvars b/contrib/terraform/hetzner/default.tfvars deleted file mode 100644 index 4e70bf1d938..00000000000 --- a/contrib/terraform/hetzner/default.tfvars +++ /dev/null @@ -1,46 +0,0 @@ -prefix = "default" -zone = "hel1" -network_zone = "eu-central" -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -ssh_private_key_path = "~/.ssh/id_rsa" - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-0" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-1" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ingress_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/hetzner/main.tf b/contrib/terraform/hetzner/main.tf deleted file mode 100644 index 8e38cee302e..00000000000 --- a/contrib/terraform/hetzner/main.tf +++ /dev/null @@ -1,57 +0,0 @@ -provider "hcloud" {} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - # source = "./modules/kubernetes-cluster-flatcar" - - prefix = var.prefix - - zone = var.zone - - machines = var.machines - - #only for flatcar - #ssh_private_key_path = var.ssh_private_key_path - - ssh_public_keys = var.ssh_public_keys - network_zone = var.network_zone - - ssh_whitelist = var.ssh_whitelist - api_server_whitelist = var.api_server_whitelist - nodeport_whitelist = var.nodeport_whitelist - ingress_whitelist = var.ingress_whitelist -} - -# -# Generate ansible inventory -# - -locals { - inventory = templatefile( - "${path.module}/templates/inventory.tpl", - { - connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", - keys(module.kubernetes.master_ip_addresses), - values(module.kubernetes.master_ip_addresses).*.public_ip, - values(module.kubernetes.master_ip_addresses).*.private_ip, - range(1, length(module.kubernetes.master_ip_addresses) + 1))) - connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", - keys(module.kubernetes.worker_ip_addresses), - values(module.kubernetes.worker_ip_addresses).*.public_ip, - values(module.kubernetes.worker_ip_addresses).*.private_ip)) - list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) - list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) - network_id = module.kubernetes.network_id - } - ) -} - -resource "null_resource" "inventories" { - provisioner "local-exec" { - command = "echo '${local.inventory}' > ${var.inventory_file}" - } - - triggers = { - template = local.inventory - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/main.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/main.tf deleted file mode 100644 index b54d360bff3..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/main.tf +++ /dev/null @@ -1,144 +0,0 @@ -resource "hcloud_network" "kubernetes" { - name = "${var.prefix}-network" - ip_range = var.private_network_cidr -} - -resource "hcloud_network_subnet" "kubernetes" { - type = "cloud" - network_id = hcloud_network.kubernetes.id - network_zone = var.network_zone - ip_range = var.private_subnet_cidr -} - -resource "hcloud_ssh_key" "first" { - name = var.prefix - public_key = var.ssh_public_keys.0 -} - -resource "hcloud_server" "machine" { - for_each = { - for name, machine in var.machines : - name => machine - } - - name = "${var.prefix}-${each.key}" - ssh_keys = [hcloud_ssh_key.first.id] - # boot into rescue OS - rescue = "linux64" - # dummy value for the OS because Flatcar is not available - image = each.value.image - server_type = each.value.size - location = var.zone - connection { - host = self.ipv4_address - timeout = "5m" - private_key = file(var.ssh_private_key_path) - } - firewall_ids = each.value.node_type == "master" ? [hcloud_firewall.master.id] : [hcloud_firewall.worker.id] - provisioner "file" { - content = data.ct_config.machine-ignitions[each.key].rendered - destination = "/root/ignition.json" - } - - provisioner "remote-exec" { - inline = [ - "set -ex", - "apt update", - "apt install -y gawk", - "curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/flatcar/init/flatcar-master/bin/flatcar-install", - "chmod +x flatcar-install", - "./flatcar-install -s -i /root/ignition.json -C stable", - "shutdown -r +1", - ] - } - - # optional: - provisioner "remote-exec" { - connection { - host = self.ipv4_address - private_key = file(var.ssh_private_key_path) - timeout = "3m" - user = var.user_flatcar - } - - inline = [ - "sudo hostnamectl set-hostname ${self.name}", - ] - } -} - -resource "hcloud_server_network" "machine" { - for_each = { - for name, machine in var.machines : - name => hcloud_server.machine[name] - } - server_id = each.value.id - subnet_id = hcloud_network_subnet.kubernetes.id -} - -data "ct_config" "machine-ignitions" { - for_each = { - for name, machine in var.machines : - name => machine - } - - strict = false - content = templatefile( - "${path.module}/templates/machine.yaml.tmpl", - { - ssh_keys = jsonencode(var.ssh_public_keys) - user_flatcar = var.user_flatcar - name = each.key - } - ) -} - -resource "hcloud_firewall" "master" { - name = "${var.prefix}-master-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "6443" - source_ips = var.api_server_whitelist - } -} - -resource "hcloud_firewall" "worker" { - name = "${var.prefix}-worker-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "80" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "443" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "30000-32767" - source_ips = var.nodeport_whitelist - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/outputs.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/outputs.tf deleted file mode 100644 index be524deb66d..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/outputs.tf +++ /dev/null @@ -1,29 +0,0 @@ -output "master_ip_addresses" { - value = { - for name, machine in var.machines : - name => { - "private_ip" = hcloud_server_network.machine[name].ip - "public_ip" = hcloud_server.machine[name].ipv4_address - } - if machine.node_type == "master" - } -} - -output "worker_ip_addresses" { - value = { - for name, machine in var.machines : - name => { - "private_ip" = hcloud_server_network.machine[name].ip - "public_ip" = hcloud_server.machine[name].ipv4_address - } - if machine.node_type == "worker" - } -} - -output "cluster_private_network_cidr" { - value = var.private_subnet_cidr -} - -output "network_id" { - value = hcloud_network.kubernetes.id -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/templates/machine.yaml.tmpl b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/templates/machine.yaml.tmpl deleted file mode 100644 index 95ce1d867ad..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/templates/machine.yaml.tmpl +++ /dev/null @@ -1,19 +0,0 @@ -variant: flatcar -version: 1.0.0 - -passwd: - users: - - name: ${user_flatcar} - ssh_authorized_keys: ${ssh_keys} - -storage: - files: - - path: /home/core/works - filesystem: root - mode: 0755 - contents: - inline: | - #!/bin/bash - set -euo pipefail - hostname="$(hostname)" - echo My name is ${name} and the hostname is $${hostname} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/variables.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/variables.tf deleted file mode 100644 index 809377946ec..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/variables.tf +++ /dev/null @@ -1,60 +0,0 @@ - -variable "zone" { - type = string - default = "fsn1" -} - -variable "prefix" { - default = "k8s" -} - -variable "user_flatcar" { - type = string - default = "core" -} - -variable "machines" { - type = map(object({ - node_type = string - size = string - image = string - })) -} - - - -variable "ssh_public_keys" { - type = list(string) -} - -variable "ssh_private_key_path" { - type = string - default = "~/.ssh/id_rsa" -} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) -} - -variable "private_network_cidr" { - default = "10.0.0.0/16" -} - -variable "private_subnet_cidr" { - default = "10.0.10.0/24" -} -variable "network_zone" { - default = "eu-central" -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/versions.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/versions.tf deleted file mode 100644 index ac98e278469..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/versions.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - required_providers { - hcloud = { - source = "hetznercloud/hcloud" - } - ct = { - source = "poseidon/ct" - version = "0.11.0" - } - null = { - source = "hashicorp/null" - } - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 2a0e458815f..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,122 +0,0 @@ -resource "hcloud_network" "kubernetes" { - name = "${var.prefix}-network" - ip_range = var.private_network_cidr -} - -resource "hcloud_network_subnet" "kubernetes" { - type = "cloud" - network_id = hcloud_network.kubernetes.id - network_zone = var.network_zone - ip_range = var.private_subnet_cidr -} - -resource "hcloud_server" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - image = each.value.image - server_type = each.value.size - location = var.zone - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - ssh_public_keys = var.ssh_public_keys - } - ) - - firewall_ids = [hcloud_firewall.master.id] -} - -resource "hcloud_server_network" "master" { - for_each = hcloud_server.master - - server_id = each.value.id - - subnet_id = hcloud_network_subnet.kubernetes.id -} - -resource "hcloud_server" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - image = each.value.image - server_type = each.value.size - location = var.zone - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - ssh_public_keys = var.ssh_public_keys - } - ) - - firewall_ids = [hcloud_firewall.worker.id] - -} - -resource "hcloud_server_network" "worker" { - for_each = hcloud_server.worker - - server_id = each.value.id - - subnet_id = hcloud_network_subnet.kubernetes.id -} - -resource "hcloud_firewall" "master" { - name = "${var.prefix}-master-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "6443" - source_ips = var.api_server_whitelist - } -} - -resource "hcloud_firewall" "worker" { - name = "${var.prefix}-worker-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "80" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "443" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "30000-32767" - source_ips = var.nodeport_whitelist - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf deleted file mode 100644 index 5c31aaa003c..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,27 +0,0 @@ -output "master_ip_addresses" { - value = { - for key, instance in hcloud_server.master : - instance.name => { - "private_ip" = hcloud_server_network.master[key].ip - "public_ip" = hcloud_server.master[key].ipv4_address - } - } -} - -output "worker_ip_addresses" { - value = { - for key, instance in hcloud_server.worker : - instance.name => { - "private_ip" = hcloud_server_network.worker[key].ip - "public_ip" = hcloud_server.worker[key].ipv4_address - } - } -} - -output "cluster_private_network_cidr" { - value = var.private_subnet_cidr -} - -output "network_id" { - value = hcloud_network.kubernetes.id -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl b/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl deleted file mode 100644 index 02a4e2dd084..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl +++ /dev/null @@ -1,16 +0,0 @@ -#cloud-config - -users: - - default - - name: ubuntu - shell: /bin/bash - sudo: "ALL=(ALL) NOPASSWD:ALL" - ssh_authorized_keys: - %{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} - %{ endfor ~} - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index 7486e0806a5..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,44 +0,0 @@ -variable "zone" { - type = string -} - -variable "prefix" {} - -variable "machines" { - type = map(object({ - node_type = string - size = string - image = string - })) -} - -variable "ssh_public_keys" { - type = list(string) -} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) -} - -variable "private_network_cidr" { - default = "10.0.0.0/16" -} - -variable "private_subnet_cidr" { - default = "10.0.10.0/24" -} -variable "network_zone" { - default = "eu-central" -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 78bc5047b07..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - hcloud = { - source = "hetznercloud/hcloud" - version = "1.38.2" - } - } - required_version = ">= 0.14" -} diff --git a/contrib/terraform/hetzner/output.tf b/contrib/terraform/hetzner/output.tf deleted file mode 100644 index 0336f72ca80..00000000000 --- a/contrib/terraform/hetzner/output.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "master_ips" { - value = module.kubernetes.master_ip_addresses -} - -output "worker_ips" { - value = module.kubernetes.worker_ip_addresses -} diff --git a/contrib/terraform/hetzner/sample-inventory/cluster.tfvars b/contrib/terraform/hetzner/sample-inventory/cluster.tfvars deleted file mode 100644 index 4e70bf1d938..00000000000 --- a/contrib/terraform/hetzner/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,46 +0,0 @@ -prefix = "default" -zone = "hel1" -network_zone = "eu-central" -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -ssh_private_key_path = "~/.ssh/id_rsa" - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-0" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-1" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ingress_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/hetzner/sample-inventory/group_vars b/contrib/terraform/hetzner/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/hetzner/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/hetzner/templates/inventory.tpl b/contrib/terraform/hetzner/templates/inventory.tpl deleted file mode 100644 index 08d63693f97..00000000000 --- a/contrib/terraform/hetzner/templates/inventory.tpl +++ /dev/null @@ -1,19 +0,0 @@ -[all] -${connection_strings_master} -${connection_strings_worker} - -[kube_control_plane] -${list_master} - -[etcd] -${list_master} - -[kube_node] -${list_worker} - -[k8s_cluster:children] -kube_control_plane -kube_node - -[k8s_cluster:vars] -network_id=${network_id} diff --git a/contrib/terraform/hetzner/variables.tf b/contrib/terraform/hetzner/variables.tf deleted file mode 100644 index 049ce0d4227..00000000000 --- a/contrib/terraform/hetzner/variables.tf +++ /dev/null @@ -1,56 +0,0 @@ -variable "zone" { - description = "The zone where to run the cluster" -} -variable "network_zone" { - description = "The network zone where the cluster is running" - default = "eu-central" -} - -variable "prefix" { - description = "Prefix for resource names" - default = "default" -} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - size = string - image = string - })) -} - -variable "ssh_public_keys" { - description = "Public SSH key which are injected into the VMs." - type = list(string) -} - -variable "ssh_private_key_path" { - description = "Private SSH key which connect to the VMs." - type = string - default = "~/.ssh/id_rsa" -} - -variable "ssh_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for ssh" - type = list(string) -} - -variable "api_server_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" - type = list(string) -} - -variable "nodeport_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" - type = list(string) -} - -variable "ingress_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for HTTP" - type = list(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} diff --git a/contrib/terraform/hetzner/versions.tf b/contrib/terraform/hetzner/versions.tf deleted file mode 100644 index e331beb4582..00000000000 --- a/contrib/terraform/hetzner/versions.tf +++ /dev/null @@ -1,12 +0,0 @@ -terraform { - required_providers { - hcloud = { - source = "hetznercloud/hcloud" - version = "1.38.2" - } - null = { - source = "hashicorp/null" - } - } - required_version = ">= 0.14" -} diff --git a/contrib/terraform/nifcloud/.gitignore b/contrib/terraform/nifcloud/.gitignore deleted file mode 100644 index 9adadc30ac2..00000000000 --- a/contrib/terraform/nifcloud/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.tfstate* -.terraform.lock.hcl -.terraform - -sample-inventory/inventory.ini diff --git a/contrib/terraform/nifcloud/README.md b/contrib/terraform/nifcloud/README.md deleted file mode 100644 index a6dcf014855..00000000000 --- a/contrib/terraform/nifcloud/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Kubernetes on NIFCLOUD with Terraform - -Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster - +----------------------------+ -+---------------+ | +--------------------+ | -| | | | +--------------------+ | -| API server LB +---------> | | | | -| | | | | Control Plane/etcd | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------------+ | - | ^ | - | | | - | v | - | +--------------------+ | - | | +--------------------+ | - | | | | | - | | | Worker | | - | | | node(s) | | - | +-+ | | - | +--------------------+ | - +----------------------------+ -``` - -## Requirements - -* Terraform 1.3.7 - -## Quickstart - -### Export Variables - -* Your NIFCLOUD credentials: - - ```bash - export NIFCLOUD_ACCESS_KEY_ID= - export NIFCLOUD_SECRET_ACCESS_KEY= - ``` - -* The SSH KEY used to connect to the instance: - * FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm) - - ```bash - export TF_VAR_SSHKEY_NAME= - ``` - -* The IP address to connect to bastion server: - - ```bash - export TF_VAR_working_instance_ip=$(curl ifconfig.me) - ``` - -### Create The Infrastructure - -* Run terraform: - - ```bash - terraform init - terraform apply -var-file ./sample-inventory/cluster.tfvars - ``` - -### Setup The Kubernetes - -* Generate cluster configuration file: - - ```bash - ./generate-inventory.sh > sample-inventory/inventory.ini - ``` - -* Export Variables: - - ```bash - BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip') - API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb') - CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip') - export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\"" - ``` - -* Set ssh-agent" - - ```bash - eval `ssh-agent` - ssh-add - ``` - -* Run cluster.yml playbook: - - ```bash - cd ./../../../ - ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml - ``` - -### Connecting to Kubernetes - -* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost -* Fetching kubeconfig file: - - ```bash - mkdir -p ~/.kube - scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config - ``` - -* Rewrite /etc/hosts - - ```bash - sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts - ``` - -* Run kubectl - - ```bash - kubectl get node - ``` - -## Variables - -* `region`: Region where to run the cluster -* `az`: Availability zone where to run the cluster -* `private_ip_bn`: Private ip address of bastion server -* `private_network_cidr`: Subnet of private network -* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name - * `private_ip`: private ip address of machine -* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name - * `private_ip`: private ip address of machine -* `instance_key_name`: The key name of the Key Pair to use for the instance -* `instance_type_bn`: The instance type of bastion server -* `instance_type_wk`: The instance type of worker node -* `instance_type_cp`: The instance type of control plane -* `image_name`: OS image used for the instance -* `working_instance_ip`: The IP address to connect to bastion server -* `accounting_type`: Accounting type. (1: monthly, 2: pay per use) diff --git a/contrib/terraform/nifcloud/generate-inventory.sh b/contrib/terraform/nifcloud/generate-inventory.sh deleted file mode 100755 index 5d90eb5f426..00000000000 --- a/contrib/terraform/nifcloud/generate-inventory.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -# -# Generates a inventory file based on the terraform output. -# After provisioning a cluster, simply run this command and supply the terraform state file -# Default state file is terraform.tfstate -# - -set -e - -TF_OUT=$(terraform output -json) - -CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}")) -WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}")) -mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}")) -mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}")) - -API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}")) - -echo "[all]" -# Generate control plane hosts -i=1 -for name in "${CONTROL_PLANE_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}")) - echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}" - i=$(( i + 1 )) -done - -# Generate worker hosts -for name in "${WORKER_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}")) - echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}" -done - -API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}")) - -echo "" -echo "[all:vars]" -echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']" -echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}" - - -echo "" -echo "[kube_control_plane]" -for name in "${CONTROL_PLANE_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[etcd]" -for name in "${CONTROL_PLANE_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[kube_node]" -for name in "${WORKER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[k8s_cluster:children]" -echo "kube_control_plane" -echo "kube_node" diff --git a/contrib/terraform/nifcloud/main.tf b/contrib/terraform/nifcloud/main.tf deleted file mode 100644 index d5a070967bc..00000000000 --- a/contrib/terraform/nifcloud/main.tf +++ /dev/null @@ -1,36 +0,0 @@ -provider "nifcloud" { - region = var.region -} - -module "kubernetes_cluster" { - source = "./modules/kubernetes-cluster" - - availability_zone = var.az - prefix = "dev" - - private_network_cidr = var.private_network_cidr - - instance_key_name = var.instance_key_name - instances_cp = var.instances_cp - instances_wk = var.instances_wk - image_name = var.image_name - - instance_type_bn = var.instance_type_bn - instance_type_cp = var.instance_type_cp - instance_type_wk = var.instance_type_wk - - private_ip_bn = var.private_ip_bn - - additional_lb_filter = [var.working_instance_ip] -} - -resource "nifcloud_security_group_rule" "ssh_from_bastion" { - security_group_names = [ - module.kubernetes_cluster.security_group_name.bastion - ] - type = "IN" - from_port = 22 - to_port = 22 - protocol = "TCP" - cidr_ip = var.working_instance_ip -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 0e5fd383da9..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,301 +0,0 @@ -################################################# -## -## Local variables -## -locals { - # e.g. east-11 is 11 - az_num = reverse(split("-", var.availability_zone))[0] - # e.g. east-11 is e11 - az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}" - - # Port used by the protocol - port_ssh = 22 - port_kubectl = 6443 - port_kubelet = 10250 - - # calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements - port_bgp = 179 - port_vxlan = 4789 - port_etcd = 2379 -} - -################################################# -## -## General -## - -# data -data "nifcloud_image" "this" { - image_name = var.image_name -} - -# private lan -resource "nifcloud_private_lan" "this" { - private_lan_name = "${var.prefix}lan" - availability_zone = var.availability_zone - cidr_block = var.private_network_cidr - accounting_type = var.accounting_type -} - -################################################# -## -## Bastion -## -resource "nifcloud_security_group" "bn" { - group_name = "${var.prefix}bn" - description = "${var.prefix} bastion" - availability_zone = var.availability_zone -} - -resource "nifcloud_instance" "bn" { - - instance_id = "${local.az_short_name}${var.prefix}bn01" - security_group = nifcloud_security_group.bn.group_name - instance_type = var.instance_type_bn - - user_data = templatefile("${path.module}/templates/userdata.tftpl", { - private_ip_address = var.private_ip_bn - ssh_port = local.port_ssh - hostname = "${local.az_short_name}${var.prefix}bn01" - }) - - availability_zone = var.availability_zone - accounting_type = var.accounting_type - image_id = data.nifcloud_image.this.image_id - key_name = var.instance_key_name - - network_interface { - network_id = "net-COMMON_GLOBAL" - } - network_interface { - network_id = nifcloud_private_lan.this.network_id - ip_address = "static" - } - - # The image_id changes when the OS image type is demoted from standard to public. - lifecycle { - ignore_changes = [ - image_id, - user_data, - ] - } -} - -################################################# -## -## Control Plane -## -resource "nifcloud_security_group" "cp" { - group_name = "${var.prefix}cp" - description = "${var.prefix} control plane" - availability_zone = var.availability_zone -} - -resource "nifcloud_instance" "cp" { - for_each = var.instances_cp - - instance_id = "${local.az_short_name}${var.prefix}${each.key}" - security_group = nifcloud_security_group.cp.group_name - instance_type = var.instance_type_cp - user_data = templatefile("${path.module}/templates/userdata.tftpl", { - private_ip_address = each.value.private_ip - ssh_port = local.port_ssh - hostname = "${local.az_short_name}${var.prefix}${each.key}" - }) - - availability_zone = var.availability_zone - accounting_type = var.accounting_type - image_id = data.nifcloud_image.this.image_id - key_name = var.instance_key_name - - network_interface { - network_id = "net-COMMON_GLOBAL" - } - network_interface { - network_id = nifcloud_private_lan.this.network_id - ip_address = "static" - } - - # The image_id changes when the OS image type is demoted from standard to public. - lifecycle { - ignore_changes = [ - image_id, - user_data, - ] - } -} - -resource "nifcloud_load_balancer" "this" { - load_balancer_name = "${local.az_short_name}${var.prefix}cp" - accounting_type = var.accounting_type - balancing_type = 1 // Round-Robin - load_balancer_port = local.port_kubectl - instance_port = local.port_kubectl - instances = [for v in nifcloud_instance.cp : v.instance_id] - filter = concat( - [for k, v in nifcloud_instance.cp : v.public_ip], - [for k, v in nifcloud_instance.wk : v.public_ip], - var.additional_lb_filter, - ) - filter_type = 1 // Allow -} - -################################################# -## -## Worker -## -resource "nifcloud_security_group" "wk" { - group_name = "${var.prefix}wk" - description = "${var.prefix} worker" - availability_zone = var.availability_zone -} - -resource "nifcloud_instance" "wk" { - for_each = var.instances_wk - - instance_id = "${local.az_short_name}${var.prefix}${each.key}" - security_group = nifcloud_security_group.wk.group_name - instance_type = var.instance_type_wk - user_data = templatefile("${path.module}/templates/userdata.tftpl", { - private_ip_address = each.value.private_ip - ssh_port = local.port_ssh - hostname = "${local.az_short_name}${var.prefix}${each.key}" - }) - - availability_zone = var.availability_zone - accounting_type = var.accounting_type - image_id = data.nifcloud_image.this.image_id - key_name = var.instance_key_name - - network_interface { - network_id = "net-COMMON_GLOBAL" - } - network_interface { - network_id = nifcloud_private_lan.this.network_id - ip_address = "static" - } - - # The image_id changes when the OS image type is demoted from standard to public. - lifecycle { - ignore_changes = [ - image_id, - user_data, - ] - } -} - -################################################# -## -## Security Group Rule: Kubernetes -## - -# ssh -resource "nifcloud_security_group_rule" "ssh_from_bastion" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_ssh - to_port = local.port_ssh - protocol = "TCP" - source_security_group_name = nifcloud_security_group.bn.group_name -} - -# kubectl -resource "nifcloud_security_group_rule" "kubectl_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_kubectl - to_port = local.port_kubectl - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -# kubelet -resource "nifcloud_security_group_rule" "kubelet_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_kubelet - to_port = local.port_kubelet - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -resource "nifcloud_security_group_rule" "kubelet_from_control_plane" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - ] - type = "IN" - from_port = local.port_kubelet - to_port = local.port_kubelet - protocol = "TCP" - source_security_group_name = nifcloud_security_group.cp.group_name -} - -################################################# -## -## Security Group Rule: calico -## - -# vslan -resource "nifcloud_security_group_rule" "vxlan_from_control_plane" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - ] - type = "IN" - from_port = local.port_vxlan - to_port = local.port_vxlan - protocol = "UDP" - source_security_group_name = nifcloud_security_group.cp.group_name -} - -resource "nifcloud_security_group_rule" "vxlan_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_vxlan - to_port = local.port_vxlan - protocol = "UDP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -# bgp -resource "nifcloud_security_group_rule" "bgp_from_control_plane" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - ] - type = "IN" - from_port = local.port_bgp - to_port = local.port_bgp - protocol = "TCP" - source_security_group_name = nifcloud_security_group.cp.group_name -} - -resource "nifcloud_security_group_rule" "bgp_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_bgp - to_port = local.port_bgp - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -# etcd -resource "nifcloud_security_group_rule" "etcd_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_etcd - to_port = local.port_etcd - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf deleted file mode 100644 index a6232f821da..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf +++ /dev/null @@ -1,48 +0,0 @@ -output "control_plane_lb" { - description = "The DNS name of LB for control plane" - value = nifcloud_load_balancer.this.dns_name -} - -output "security_group_name" { - description = "The security group used in the cluster" - value = { - bastion = nifcloud_security_group.bn.group_name, - control_plane = nifcloud_security_group.cp.group_name, - worker = nifcloud_security_group.wk.group_name, - } -} - -output "private_network_id" { - description = "The private network used in the cluster" - value = nifcloud_private_lan.this.id -} - -output "bastion_info" { - description = "The basion information in cluster" - value = { (nifcloud_instance.bn.instance_id) : { - instance_id = nifcloud_instance.bn.instance_id, - unique_id = nifcloud_instance.bn.unique_id, - private_ip = nifcloud_instance.bn.private_ip, - public_ip = nifcloud_instance.bn.public_ip, - } } -} - -output "worker_info" { - description = "The worker information in cluster" - value = { for v in nifcloud_instance.wk : v.instance_id => { - instance_id = v.instance_id, - unique_id = v.unique_id, - private_ip = v.private_ip, - public_ip = v.public_ip, - } } -} - -output "control_plane_info" { - description = "The control plane information in cluster" - value = { for v in nifcloud_instance.cp : v.instance_id => { - instance_id = v.instance_id, - unique_id = v.unique_id, - private_ip = v.private_ip, - public_ip = v.public_ip, - } } -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl b/contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl deleted file mode 100644 index 55e626a2a0f..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -################################################# -## -## IP Address -## -configure_private_ip_address () { - cat << EOS > /etc/netplan/01-netcfg.yaml -network: - version: 2 - renderer: networkd - ethernets: - ens192: - dhcp4: yes - dhcp6: yes - dhcp-identifier: mac - ens224: - dhcp4: no - dhcp6: no - addresses: [${private_ip_address}] -EOS - netplan apply -} -configure_private_ip_address - -################################################# -## -## SSH -## -configure_ssh_port () { - sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config -} -configure_ssh_port - -################################################# -## -## Hostname -## -hostnamectl set-hostname ${hostname} - -################################################# -## -## Disable swap files genereated by systemd-gpt-auto-generator -## -systemctl mask "dev-sda3.swap" diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf deleted file mode 100644 index 97ef4847bf2..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_version = ">=1.3.7" - required_providers { - nifcloud = { - source = "nifcloud/nifcloud" - version = ">= 1.8.0, < 2.0.0" - } - } -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index 65c11fe2029..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,81 +0,0 @@ -variable "availability_zone" { - description = "The availability zone" - type = string -} - -variable "prefix" { - description = "The prefix for the entire cluster" - type = string - validation { - condition = length(var.prefix) <= 5 - error_message = "Must be a less than 5 character long." - } -} - -variable "private_network_cidr" { - description = "The subnet of private network" - type = string - validation { - condition = can(cidrnetmask(var.private_network_cidr)) - error_message = "Must be a valid IPv4 CIDR block address." - } -} - -variable "private_ip_bn" { - description = "Private IP of bastion server" - type = string -} - -variable "instances_cp" { - type = map(object({ - private_ip = string - })) -} - -variable "instances_wk" { - type = map(object({ - private_ip = string - })) -} - -variable "instance_key_name" { - description = "The key name of the Key Pair to use for the instance" - type = string -} - -variable "instance_type_bn" { - description = "The instance type of bastion server" - type = string -} - -variable "instance_type_wk" { - description = "The instance type of worker" - type = string -} - -variable "instance_type_cp" { - description = "The instance type of control plane" - type = string -} - -variable "image_name" { - description = "The name of image" - type = string -} - -variable "additional_lb_filter" { - description = "Additional LB filter" - type = list(string) -} - -variable "accounting_type" { - type = string - default = "1" - validation { - condition = anytrue([ - var.accounting_type == "1", // Monthly - var.accounting_type == "2", // Pay per use - ]) - error_message = "Must be a 1 or 2." - } -} diff --git a/contrib/terraform/nifcloud/output.tf b/contrib/terraform/nifcloud/output.tf deleted file mode 100644 index dcdeacba2c8..00000000000 --- a/contrib/terraform/nifcloud/output.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "kubernetes_cluster" { - value = module.kubernetes_cluster -} diff --git a/contrib/terraform/nifcloud/sample-inventory/cluster.tfvars b/contrib/terraform/nifcloud/sample-inventory/cluster.tfvars deleted file mode 100644 index 3410a54a886..00000000000 --- a/contrib/terraform/nifcloud/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,22 +0,0 @@ -region = "jp-west-1" -az = "west-11" - -instance_key_name = "deployerkey" - -instance_type_bn = "e-medium" -instance_type_cp = "e-medium" -instance_type_wk = "e-medium" - -private_network_cidr = "192.168.30.0/24" -instances_cp = { - "cp01" : { private_ip : "192.168.30.11/24" } - "cp02" : { private_ip : "192.168.30.12/24" } - "cp03" : { private_ip : "192.168.30.13/24" } -} -instances_wk = { - "wk01" : { private_ip : "192.168.30.21/24" } - "wk02" : { private_ip : "192.168.30.22/24" } -} -private_ip_bn = "192.168.30.10/24" - -image_name = "Ubuntu Server 22.04 LTS" diff --git a/contrib/terraform/nifcloud/sample-inventory/group_vars b/contrib/terraform/nifcloud/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/nifcloud/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/nifcloud/terraform.tf b/contrib/terraform/nifcloud/terraform.tf deleted file mode 100644 index 9a14bc665af..00000000000 --- a/contrib/terraform/nifcloud/terraform.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_version = ">=1.3.7" - required_providers { - nifcloud = { - source = "nifcloud/nifcloud" - version = "1.8.0" - } - } -} diff --git a/contrib/terraform/nifcloud/variables.tf b/contrib/terraform/nifcloud/variables.tf deleted file mode 100644 index 558655ffe8a..00000000000 --- a/contrib/terraform/nifcloud/variables.tf +++ /dev/null @@ -1,77 +0,0 @@ -variable "region" { - description = "The region" - type = string -} - -variable "az" { - description = "The availability zone" - type = string -} - -variable "private_ip_bn" { - description = "Private IP of bastion server" - type = string -} - -variable "private_network_cidr" { - description = "The subnet of private network" - type = string - validation { - condition = can(cidrnetmask(var.private_network_cidr)) - error_message = "Must be a valid IPv4 CIDR block address." - } -} - -variable "instances_cp" { - type = map(object({ - private_ip = string - })) -} - -variable "instances_wk" { - type = map(object({ - private_ip = string - })) -} - -variable "instance_key_name" { - description = "The key name of the Key Pair to use for the instance" - type = string -} - -variable "instance_type_bn" { - description = "The instance type of bastion server" - type = string -} - -variable "instance_type_wk" { - description = "The instance type of worker" - type = string -} - -variable "instance_type_cp" { - description = "The instance type of control plane" - type = string -} - -variable "image_name" { - description = "The name of image" - type = string -} - -variable "working_instance_ip" { - description = "The IP address to connect to bastion server." - type = string -} - -variable "accounting_type" { - type = string - default = "2" - validation { - condition = anytrue([ - var.accounting_type == "1", // Monthly - var.accounting_type == "2", // Pay per use - ]) - error_message = "Must be a 1 or 2." - } -} diff --git a/contrib/terraform/openstack/.gitignore b/contrib/terraform/openstack/.gitignore deleted file mode 100644 index 7e4921aa2c5..00000000000 --- a/contrib/terraform/openstack/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -.terraform -*.tfvars -!sample-inventory/cluster.tfvars -*.tfstate -*.tfstate.backup diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md deleted file mode 100644 index 73f8e39c833..00000000000 --- a/contrib/terraform/openstack/README.md +++ /dev/null @@ -1,801 +0,0 @@ -# Kubernetes on OpenStack with Terraform - -Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on -OpenStack. - -## Status - -This will install a Kubernetes cluster on an OpenStack Cloud. It should work on -most modern installs of OpenStack that support the basic services. - -### Known compatible public clouds - -- [Auro](https://auro.io/) -- [Betacloud](https://www.betacloud.io/) -- [CityCloud](https://www.citycloud.com/) -- [DreamHost](https://www.dreamhost.com/cloud/computing/) -- [ELASTX](https://elastx.se/) -- [EnterCloudSuite](https://www.entercloudsuite.com/) -- [FugaCloud](https://fuga.cloud/) -- [Open Telekom Cloud](https://cloud.telekom.de/) -- [OVH](https://www.ovh.com/) -- [Rackspace](https://www.rackspace.com/) -- [Safespring](https://www.safespring.com) -- [Ultimum](https://ultimum.io/) -- [VexxHost](https://vexxhost.com/) -- [Zetta](https://www.zetta.io/) -- [Cloudify](https://www.cloudify.ro/en) - -## Approach - -The terraform configuration inspects variables found in -[variables.tf](variables.tf) to create resources in your OpenStack cluster. -There is a [python script](../terraform.py) that reads the generated`.tfstate` -file to generate a dynamic inventory that is consumed by the main ansible script -to actually install kubernetes and stand up the cluster. - -### Networking - -The configuration includes creating a private subnet with a router to the -external net. It will allocate floating IPs from a pool and assign them to the -hosts where that makes sense. You have the option of creating bastion hosts -inside the private subnet to access the nodes there. Alternatively, a node with -a floating IP can be used as a jump host to nodes without. - -#### Using an existing router - -It is possible to use an existing router instead of creating one. To use an -existing router set the router\_id variable to the uuid of the router you wish -to use. - -For example: - -```ShellSession -router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3" -``` - -### Kubernetes Nodes - -You can create many different kubernetes topologies by setting the number of -different classes of hosts. For each class there are options for allocating -floating IP addresses or not. - -- Control plane nodes with etcd -- Control plane nodes without etcd -- Standalone etcd hosts -- Kubernetes worker nodes - -Note that the Ansible script will report an invalid configuration if you wind up -with an even number of etcd instances since that is not a valid configuration. This -restriction includes standalone etcd nodes that are deployed in a cluster along with -control plane nodes with etcd replicas. As an example, if you have three control plane -nodes with etcd replicas and three standalone etcd nodes, the script will fail since -there are now six total etcd replicas. - -### GlusterFS shared file system - -The Terraform configuration supports provisioning of an optional GlusterFS -shared file system based on a separate set of VMs. To enable this, you need to -specify: - -- the number of Gluster hosts (minimum 2) -- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks -- Other properties related to provisioning the hosts - -Even if you are using Flatcar Container Linux by Kinvolk for your cluster, you will still -need the GlusterFS VMs to be based on either Debian or RedHat based images. -Flatcar Container Linux by Kinvolk cannot serve GlusterFS, but can connect to it through -binaries available on hyperkube v1.4.3_coreos.0 or higher. - -## Requirements - -- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.14 or later -- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) -- you already have a suitable OS image in Glance -- you already have a floating IP pool created -- you have security groups enabled -- you have a pair of keys generated that can be used to secure the new hosts - -## Module Architecture - -The configuration is divided into four modules: - -- Network -- Loadbalancer -- IPs -- Compute - -The main reason for splitting the configuration up in this way is to easily -accommodate situations where floating IPs are limited by a quota or if you have -any external references to the floating IP (e.g. DNS) that would otherwise have -to be updated. - -You can force your existing IPs by modifying the compute variables in -`kubespray.tf` as follows: - -```ini -k8s_master_fips = ["151.101.129.67"] -k8s_node_fips = ["151.101.129.68"] -``` - -## Terraform - -Terraform will be used to provision all of the OpenStack resources with base software as appropriate. - -### Configuration - -#### Inventory files - -Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state): - -```ShellSession -cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER -cd inventory/$CLUSTER -ln -s ../../contrib/terraform/openstack/hosts -ln -s ../../contrib -``` - -This will be the base for subsequent Terraform commands. - -#### OpenStack access and credentials - -No provider variables are hardcoded inside `variables.tf` because Terraform -supports various authentication methods for OpenStack: the older script and -environment method (using `openrc`) as well as a newer declarative method, and -different OpenStack environments may support Identity API version 2 or 3. - -These are examples and may vary depending on your OpenStack cloud provider, -for an exhaustive list on how to authenticate on OpenStack with Terraform -please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/). - -##### Declarative method (recommended) - -The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in: - -- the current directory -- `~/.config/openstack` -- `/etc/openstack` - -`clouds.yaml`: - -```yaml -clouds: - mycloud: - auth: - auth_url: https://openstack:5000/v3 - username: "username" - project_name: "projectname" - project_id: projectid - user_domain_name: "Default" - password: "password" - region_name: "RegionOne" - interface: "public" - identity_api_version: 3 -``` - -If you have multiple clouds defined in your `clouds.yaml` file you can choose -the one you want to use with the environment variable `OS_CLOUD`: - -```ShellSession -export OS_CLOUD=mycloud -``` - -##### Openrc method - -When using classic environment variables, Terraform uses default `OS_*` -environment variables. A script suitable for your environment may be available -from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*. - -With identity v2: - -```ShellSession -source openrc - -env | grep OS - -OS_AUTH_URL=https://openstack:5000/v2.0 -OS_PROJECT_ID=projectid -OS_PROJECT_NAME=projectname -OS_USERNAME=username -OS_PASSWORD=password -OS_REGION_NAME=RegionOne -OS_INTERFACE=public -OS_IDENTITY_API_VERSION=2 -``` - -With identity v3: - -```ShellSession -source openrc - -env | grep OS - -OS_AUTH_URL=https://openstack:5000/v3 -OS_PROJECT_ID=projectid -OS_PROJECT_NAME=username -OS_PROJECT_DOMAIN_ID=default -OS_USERNAME=username -OS_PASSWORD=password -OS_REGION_NAME=RegionOne -OS_INTERFACE=public -OS_IDENTITY_API_VERSION=3 -OS_USER_DOMAIN_NAME=Default -``` - -Terraform does not support a mix of DomainName and DomainID, choose one or the other: - -- provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username - -```ShellSession -unset OS_USER_DOMAIN_NAME -export OS_USER_DOMAIN_ID=default -``` - -or - -```ShellSession -unset OS_PROJECT_DOMAIN_ID -set OS_PROJECT_DOMAIN_NAME=Default -``` - -#### Cluster variables - -The construction of the cluster is driven by values found in -[variables.tf](variables.tf). - -For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. - -|Variable | Description | -|---------|-------------| -|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. | -|`az_list` | List of Availability Zones available in your OpenStack cluster. | -|`network_name` | The name to be given to the internal network that will be generated | -|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default | -|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated | -|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. | -|`floatingip_pool` | Name of the pool from which floating IPs will be allocated | -|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. | -|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. | -|`external_net` | UUID of the external network that will be routed to | -|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` | -|`image`,`image_gfs`, `image_master` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. | -|`image_uuid`,`image_gfs_uuid`, `image_master_uuid` | UUID of the image to use in provisioning the compute resources. Should already be loaded into glance. | -|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected | -|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs | -|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses| -|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses | -|`number_of_etcd` | Number of pure etcd nodes | -|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. | -|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one | -|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. | -| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks | -|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. | -|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. | -|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default | -|`bastion_allowed_remote_ipv6_ips` | List of IPv6 CIDR allowed to initiate a SSH connection, `["::/0"]` by default | -|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default | -|`master_allowed_remote_ipv6_ips` | List of IPv6 CIDR blocks allowed to initiate an API connection, `["::/0"]` by default | -|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default | -|`bastion_allowed_ports_ipv6` | List of ports to open on bastion node for IPv6 CIDR blocks, `[]` by default | -|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default | -|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default | -|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default | -|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default | -|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}]` by default | -|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default | -|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default | -|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage | -|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage | -|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default | -|`node_volume_type` | Volume type of the root volume for nodes, 'Default' by default | -|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage | -|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage | -|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage | -|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) | -|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) | -|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) | -|`additional_server_groups` | Extra server groups to create. Set "policy" to the policy for the group, expected format is `{"new-server-group" = {"policy" = "anti-affinity"}}`, default: {} (to not create any extra groups) | -|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. | -|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default | -|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default | -|`k8s_nodes` | Map containing worker node definition, see explanation below | -|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` | -|`k8s_master_loadbalancer_enabled` | Enable and use an Octavia load balancer for the K8s master nodes | -|`k8s_master_loadbalancer_listener_port` | Define via which port the K8s Api should be exposed. `6443` by default | -|`k8s_master_loadbalancer_server_port` | Define via which port the K8S api is available on the master nodes. `6443` by default | -|`k8s_master_loadbalancer_public_ip` | Specify if an existing floating IP should be used for the load balancer. A new floating IP is assigned by default | - -##### k8s_nodes - -Allows a custom definition of worker nodes giving the operator full control over individual node flavor and availability zone placement. -To enable the use of this mode set the `number_of_k8s_nodes` and `number_of_k8s_nodes_no_floating_ip` variables to 0. -Then define your desired worker node configuration using the `k8s_nodes` variable. -The `az`, `flavor` and `floating_ip` parameters are mandatory. -The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes. - -```yaml -k8s_nodes: - node-name: - az: string # Name of the AZ - flavor: string # Flavor ID to use - floating_ip: bool # If floating IPs should be used or not - reserved_floating_ip: string # If floating_ip is true use existing floating IP, if reserved_floating_ip is an empty string and floating_ip is true, a new floating IP will be created - extra_groups: string # (optional) Additional groups to add for kubespray, defaults to no groups - image_id: string # (optional) Image ID to use, defaults to var.image_id or var.image - root_volume_size_in_gb: number # (optional) Size of the block storage to use as root disk, defaults to var.node_root_volume_size_in_gb or to use volume from flavor otherwise - volume_type: string # (optional) Volume type to use, defaults to var.node_volume_type - network_id: string # (optional) Use this network_id for the node, defaults to either var.network_id or ID of var.network_name - server_group: string # (optional) Server group to add this node to. If set, this has to be one specified in additional_server_groups, defaults to use the server group specified in node_server_group_policy - cloudinit: # (optional) Options for cloud-init - extra_partitions: # List of extra partitions (other than the root partition) to setup during creation - volume_path: string # Path to the volume to create partition for (e.g. /dev/vda ) - partition_path: string # Path to the partition (e.g. /dev/vda2 ) - mount_path: string # Path to where the partition should be mounted - partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition - partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume) - netplan_critical_dhcp_interface: string # Name of interface to set the dhcp flag critical = true, to circumvent [this issue](https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1776013). -``` - -For example: - -```ini -k8s_nodes = { - "1" = { - "az" = "sto1" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "2" = { - "az" = "sto2" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "3" = { - "az" = "sto3" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - "extra_groups" = "calico_rr" - } -} -``` - -Would result in the same configuration as: - -```ini -number_of_k8s_nodes = 3 -flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445" -az_list = ["sto1", "sto2", "sto3"] -``` - -And: - -```ini -k8s_nodes = { - "ing-1" = { - "az" = "sto1" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "ing-2" = { - "az" = "sto2" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "ing-3" = { - "az" = "sto3" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "big-1" = { - "az" = "sto1" - "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" - "floating_ip" = false - }, - "big-2" = { - "az" = "sto2" - "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" - "floating_ip" = false - }, - "big-3" = { - "az" = "sto3" - "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" - "floating_ip" = false - }, - "small-1" = { - "az" = "sto1" - "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" - "floating_ip" = false - }, - "small-2" = { - "az" = "sto2" - "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" - "floating_ip" = false - }, - "small-3" = { - "az" = "sto3" - "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" - "floating_ip" = false - } -} -``` - -Would result in three nodes in each availability zone each with their own separate naming, -flavor and floating ip configuration. - -The "schema": - -```ini -k8s_nodes = { - "key | node name suffix, must be unique" = { - "az" = string - "flavor" = string - "floating_ip" = bool - }, -} -``` - -All values are required. - -#### Terraform state files - -In the cluster's inventory folder, the following files might be created (either by Terraform -or manually), to prevent you from pushing them accidentally they are in a -`.gitignore` file in the `terraform/openstack` directory : - -- `.terraform` -- `.tfvars` -- `.tfstate` -- `.tfstate.backup` - -You can still add them manually if you want to. - -### Initialization - -Before Terraform can operate on your cluster you need to install the required -plugins. This is accomplished as follows: - -```ShellSession -cd inventory/$CLUSTER -terraform -chdir="../../contrib/terraform/openstack" init -``` - -This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. - -### Customizing with cloud-init - -You can apply cloud-init based customization for the openstack instances before provisioning your cluster. -One common template is used for all instances. Adjust the file shown below: -`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl` -For example, to enable openstack novnc access and ansible_user=root SSH access: - -```ShellSession -#cloud-config -## in some cases novnc console access is required -## it requires ssh password to be set -ssh_pwauth: yes -chpasswd: - list: | - root:secret - expire: False - -## in some cases direct root ssh access via ssh key is required -disable_root: false -``` - -### Provisioning cluster - -You can apply the Terraform configuration to your cluster with the following command -issued from your cluster's inventory directory (`inventory/$CLUSTER`): - -```ShellSession -terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars -``` - -if you chose to create a bastion host, this script will create -`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to -be able to access your machines tunneling through the bastion's IP address. If -you want to manually handle the ssh tunneling to these machines, please delete -or move that file. If you want to use this, just leave it there, as ansible will -pick it up automatically. - -### Destroying cluster - -You can destroy your new cluster with the following command issued from the cluster's inventory directory: - -```ShellSession -terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars -``` - -If you've started the Ansible run, it may also be a good idea to do some manual cleanup: - -- remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file -- clean up any temporary cache files: `rm /tmp/$CLUSTER-*` - -### Debugging - -You can enable debugging output from Terraform by setting -`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command. - -### Terraform output - -Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment: - -- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id` -- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id` - -## Ansible - -### Node access - -#### SSH - -Ensure your local ssh-agent is running and your ssh key has been added. This -step is required by the terraform provisioner: - -```ShellSession -eval $(ssh-agent -s) -ssh-add ~/.ssh/id_rsa -``` - -If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`). - -#### Metadata variables - -The [python script](../terraform.py) that reads the -generated`.tfstate` file to generate a dynamic inventory recognizes -some variables within a "metadata" block, defined in a "resource" -block (example): - -```ini -resource "openstack_compute_instance_v2" "example" { - ... - metadata { - ssh_user = "ubuntu" - prefer_ipv6 = true - python_bin = "/usr/bin/python3" - } - ... -} -``` - -As the example shows, these let you define the SSH username for -Ansible, a Python binary which is needed by Ansible if -`/usr/bin/python` doesn't exist, and whether the IPv6 address of the -instance should be preferred over IPv4. - -#### Bastion host - -Bastion access will be determined by: - -- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable). -- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables). - -If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned. -If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines. - -So, either a bastion host, or at least master/node with a floating IP are required. - -#### Test access - -Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`. - -```ShellSession -$ ansible -i inventory/$CLUSTER/hosts -m ping all -example-k8s_node-1 | SUCCESS => { - "changed": false, - "ping": "pong" -} -example-etcd-1 | SUCCESS => { - "changed": false, - "ping": "pong" -} -example-k8s-master-1 | SUCCESS => { - "changed": false, - "ping": "pong" -} -``` - -If it fails try to connect manually via SSH. It could be something as simple as a stale host key. - -### Configure cluster variables - -Edit `inventory/$CLUSTER/group_vars/all/all.yml`: - -- **bin_dir**: - -```yml -# Directory where the binaries will be installed -# Default: -# bin_dir: /usr/local/bin -# For Flatcar Container Linux by Kinvolk: -bin_dir: /opt/bin -``` - -- and **cloud_provider**: - -```yml -cloud_provider: openstack -``` - -Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`: - -- Set variable **kube_network_plugin** to your desired networking plugin. - - **flannel** works out-of-the-box - - **calico** requires [configuring OpenStack Neutron ports](/docs/cloud_controllers/openstack.md) to allow service and pod subnets - -```yml -# Choose network plugin (calico or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: flannel -``` - -- Set variable **resolvconf_mode** - -```yml -# Can be docker_dns, host_resolvconf or none -# Default: -# resolvconf_mode: docker_dns -# For Flatcar Container Linux by Kinvolk: -resolvconf_mode: host_resolvconf -``` - -- Set max amount of attached cinder volume per host (default 256) - -```yml -node_volume_attach_limit: 26 -``` - -### Deploy Kubernetes - -```ShellSession -ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml -``` - -This will take some time as there are many tasks to run. - -## Kubernetes - -### Set up kubectl - -1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation -2. Add a route to the internal IP of a master node (if needed): - -```ShellSession -sudo route add [master-internal-ip] gw [router-ip] -``` - -or - -```ShellSession -sudo route add -net [internal-subnet]/24 gw [router-ip] -``` - -1. List Kubernetes certificates & keys: - -```ShellSession -ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/ -``` - -1. Get `admin`'s certificates and keys: - -```ShellSession -ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1-key.pem > admin-key.pem -ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1.pem > admin.pem -ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem -``` - -1. Configure kubectl: - -```ShellSession -$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \ - --certificate-authority=ca.pem - -$ kubectl config set-credentials default-admin \ - --certificate-authority=ca.pem \ - --client-key=admin-key.pem \ - --client-certificate=admin.pem - -$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin -$ kubectl config use-context default-system -``` - -1. Check it: - -```ShellSession -kubectl version -``` - -## GlusterFS - -GlusterFS is not deployed by the standard `cluster.yml` playbook, see the -[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md) -for instructions. - -Basically you will install Gluster as - -```ShellSession -ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml -``` - -## What's next - -Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/). - -## Appendix - -### Migration from `number_of_k8s_nodes*` to `k8s_nodes` - -If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish -to migrate to the `k8s_nodes` style you can do it like so: - -```ShellSession -$ terraform state list -module.compute.data.openstack_images_image_v2.gfs_image -module.compute.data.openstack_images_image_v2.vm_image -module.compute.openstack_compute_floatingip_associate_v2.k8s_master[0] -module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0] -module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1] -module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2] -module.compute.openstack_compute_instance_v2.k8s_master[0] -module.compute.openstack_compute_instance_v2.k8s_node[0] -module.compute.openstack_compute_instance_v2.k8s_node[1] -module.compute.openstack_compute_instance_v2.k8s_node[2] -module.compute.openstack_compute_keypair_v2.k8s -module.compute.openstack_compute_servergroup_v2.k8s_etcd[0] -module.compute.openstack_compute_servergroup_v2.k8s_master[0] -module.compute.openstack_compute_servergroup_v2.k8s_node[0] -module.compute.openstack_networking_secgroup_rule_v2.bastion[0] -module.compute.openstack_networking_secgroup_rule_v2.egress[0] -module.compute.openstack_networking_secgroup_rule_v2.k8s -module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[0] -module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[1] -module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[2] -module.compute.openstack_networking_secgroup_rule_v2.k8s_master[0] -module.compute.openstack_networking_secgroup_rule_v2.worker[0] -module.compute.openstack_networking_secgroup_rule_v2.worker[1] -module.compute.openstack_networking_secgroup_rule_v2.worker[2] -module.compute.openstack_networking_secgroup_rule_v2.worker[3] -module.compute.openstack_networking_secgroup_rule_v2.worker[4] -module.compute.openstack_networking_secgroup_v2.bastion[0] -module.compute.openstack_networking_secgroup_v2.k8s -module.compute.openstack_networking_secgroup_v2.k8s_master -module.compute.openstack_networking_secgroup_v2.worker -module.ips.null_resource.dummy_dependency -module.ips.openstack_networking_floatingip_v2.k8s_master[0] -module.ips.openstack_networking_floatingip_v2.k8s_node[0] -module.ips.openstack_networking_floatingip_v2.k8s_node[1] -module.ips.openstack_networking_floatingip_v2.k8s_node[2] -module.network.openstack_networking_network_v2.k8s[0] -module.network.openstack_networking_router_interface_v2.k8s[0] -module.network.openstack_networking_router_v2.k8s[0] -module.network.openstack_networking_subnet_v2.k8s[0] -$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["1"]' -Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"1\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["2"]' -Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"2\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["3"]' -Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"3\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[0]' 'module.compute.openstack_compute_instance_v2.k8s_node["1"]' -Move "module.compute.openstack_compute_instance_v2.k8s_node[0]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"1\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[1]' 'module.compute.openstack_compute_instance_v2.k8s_node["2"]' -Move "module.compute.openstack_compute_instance_v2.k8s_node[1]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"2\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[2]' 'module.compute.openstack_compute_instance_v2.k8s_node["3"]' -Move "module.compute.openstack_compute_instance_v2.k8s_node[2]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"3\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[0]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["1"]' -Move "module.ips.openstack_networking_floatingip_v2.k8s_node[0]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"1\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[1]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["2"]' -Move "module.ips.openstack_networking_floatingip_v2.k8s_node[1]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"2\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[2]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["3"]' -Move "module.ips.openstack_networking_floatingip_v2.k8s_node[2]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"3\"]" -Successfully moved 1 object(s). -``` - -Of course for nodes without floating ips those steps can be omitted. diff --git a/contrib/terraform/openstack/hosts b/contrib/terraform/openstack/hosts deleted file mode 120000 index 804b6fa6069..00000000000 --- a/contrib/terraform/openstack/hosts +++ /dev/null @@ -1 +0,0 @@ -../terraform.py \ No newline at end of file diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf deleted file mode 100644 index 556fa54340f..00000000000 --- a/contrib/terraform/openstack/kubespray.tf +++ /dev/null @@ -1,155 +0,0 @@ -module "network" { - source = "./modules/network" - - external_net = var.external_net - network_name = var.network_name - subnet_cidr = var.subnet_cidr - cluster_name = var.cluster_name - dns_nameservers = var.dns_nameservers - network_dns_domain = var.network_dns_domain - use_neutron = var.use_neutron - port_security_enabled = var.port_security_enabled - router_id = var.router_id -} - -module "ips" { - source = "./modules/ips" - - number_of_k8s_masters = var.number_of_k8s_masters - number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd - number_of_k8s_nodes = var.number_of_k8s_nodes - floatingip_pool = var.floatingip_pool - number_of_bastions = var.number_of_bastions - external_net = var.external_net - network_name = var.network_name - router_id = module.network.router_id - k8s_nodes = var.k8s_nodes - k8s_masters = var.k8s_masters - k8s_master_fips = var.k8s_master_fips - bastion_fips = var.bastion_fips - router_internal_port_id = module.network.router_internal_port_id -} - -module "compute" { - source = "./modules/compute" - - cluster_name = var.cluster_name - az_list = var.az_list - az_list_node = var.az_list_node - number_of_k8s_masters = var.number_of_k8s_masters - number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd - number_of_etcd = var.number_of_etcd - number_of_k8s_masters_no_floating_ip = var.number_of_k8s_masters_no_floating_ip - number_of_k8s_masters_no_floating_ip_no_etcd = var.number_of_k8s_masters_no_floating_ip_no_etcd - number_of_k8s_nodes = var.number_of_k8s_nodes - number_of_bastions = var.number_of_bastions - number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip - number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip - k8s_masters = var.k8s_masters - k8s_nodes = var.k8s_nodes - bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb - etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb - master_root_volume_size_in_gb = var.master_root_volume_size_in_gb - node_root_volume_size_in_gb = var.node_root_volume_size_in_gb - gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb - gfs_volume_size_in_gb = var.gfs_volume_size_in_gb - master_volume_type = var.master_volume_type - node_volume_type = var.node_volume_type - public_key_path = var.public_key_path - image = var.image - image_uuid = var.image_uuid - image_gfs = var.image_gfs - image_master = var.image_master - image_master_uuid = var.image_master_uuid - image_gfs_uuid = var.image_gfs_uuid - ssh_user = var.ssh_user - ssh_user_gfs = var.ssh_user_gfs - flavor_k8s_master = var.flavor_k8s_master - flavor_k8s_node = var.flavor_k8s_node - flavor_etcd = var.flavor_etcd - flavor_gfs_node = var.flavor_gfs_node - network_name = var.network_name - flavor_bastion = var.flavor_bastion - k8s_master_fips = module.ips.k8s_master_fips - k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips - k8s_masters_fips = module.ips.k8s_masters_fips - k8s_node_fips = module.ips.k8s_node_fips - k8s_nodes_fips = module.ips.k8s_nodes_fips - bastion_fips = module.ips.bastion_fips - bastion_allowed_remote_ips = var.bastion_allowed_remote_ips - bastion_allowed_remote_ipv6_ips = var.bastion_allowed_remote_ipv6_ips - master_allowed_remote_ips = var.master_allowed_remote_ips - master_allowed_remote_ipv6_ips = var.master_allowed_remote_ipv6_ips - k8s_allowed_remote_ips = var.k8s_allowed_remote_ips - k8s_allowed_remote_ips_ipv6 = var.k8s_allowed_remote_ips_ipv6 - k8s_allowed_egress_ips = var.k8s_allowed_egress_ips - k8s_allowed_egress_ipv6_ips = var.k8s_allowed_egress_ipv6_ips - supplementary_master_groups = var.supplementary_master_groups - supplementary_node_groups = var.supplementary_node_groups - master_allowed_ports = var.master_allowed_ports - master_allowed_ports_ipv6 = var.master_allowed_ports_ipv6 - worker_allowed_ports = var.worker_allowed_ports - worker_allowed_ports_ipv6 = var.worker_allowed_ports_ipv6 - bastion_allowed_ports = var.bastion_allowed_ports - bastion_allowed_ports_ipv6 = var.bastion_allowed_ports_ipv6 - use_access_ip = var.use_access_ip - master_server_group_policy = var.master_server_group_policy - node_server_group_policy = var.node_server_group_policy - etcd_server_group_policy = var.etcd_server_group_policy - extra_sec_groups = var.extra_sec_groups - extra_sec_groups_name = var.extra_sec_groups_name - group_vars_path = var.group_vars_path - port_security_enabled = var.port_security_enabled - force_null_port_security = var.force_null_port_security - network_router_id = module.network.router_id - network_id = module.network.network_id - use_existing_network = var.use_existing_network - private_subnet_id = module.network.subnet_id - additional_server_groups = var.additional_server_groups - - depends_on = [ - module.network.subnet_id - ] -} - -module "loadbalancer" { - source = "./modules/loadbalancer" - - cluster_name = var.cluster_name - subnet_id = module.network.subnet_id - floatingip_pool = var.floatingip_pool - k8s_master_ips = module.compute.k8s_master_ips - k8s_master_loadbalancer_enabled = var.k8s_master_loadbalancer_enabled - k8s_master_loadbalancer_listener_port = var.k8s_master_loadbalancer_listener_port - k8s_master_loadbalancer_server_port = var.k8s_master_loadbalancer_server_port - k8s_master_loadbalancer_public_ip = var.k8s_master_loadbalancer_public_ip - - depends_on = [ - module.compute.k8s_master - ] -} - - -output "private_subnet_id" { - value = module.network.subnet_id -} - -output "floating_network_id" { - value = var.external_net -} - -output "router_id" { - value = module.network.router_id -} - -output "k8s_master_fips" { - value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address] -} - -output "k8s_node_fips" { - value = var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address] -} - -output "bastion_fips" { - value = module.ips.bastion_fips -} diff --git a/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt b/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt deleted file mode 100644 index a304b2c9d5d..00000000000 --- a/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt +++ /dev/null @@ -1 +0,0 @@ -ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'" diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf deleted file mode 100644 index 2256ea2b4e6..00000000000 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ /dev/null @@ -1,1092 +0,0 @@ -data "openstack_images_image_v2" "vm_image" { - count = var.image_uuid == "" ? 1 : 0 - most_recent = true - name = var.image -} - -data "openstack_images_image_v2" "gfs_image" { - count = var.image_gfs_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0 - most_recent = true - name = var.image_gfs == "" ? var.image : var.image_gfs -} - -data "openstack_images_image_v2" "image_master" { - count = var.image_master_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0 - name = var.image_master == "" ? var.image : var.image_master -} - -data "cloudinit_config" "cloudinit" { - part { - content_type = "text/cloud-config" - content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", { - extra_partitions = [], - netplan_critical_dhcp_interface = "" - }) - } -} - -data "openstack_networking_network_v2" "k8s_network" { - count = var.use_existing_network ? 1 : 0 - name = var.network_name -} - -resource "openstack_compute_keypair_v2" "k8s" { - name = "kubernetes-${var.cluster_name}" - public_key = chomp(file(var.public_key_path)) -} - -resource "openstack_networking_secgroup_v2" "k8s_master" { - name = "${var.cluster_name}-k8s-master" - description = "${var.cluster_name} - Kubernetes Master" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_v2" "k8s_master_extra" { - count = "%{if var.extra_sec_groups}1%{else}0%{endif}" - name = "${var.cluster_name}-k8s-master-${var.extra_sec_groups_name}" - description = "${var.cluster_name} - Kubernetes Master nodes - rules not managed by terraform" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master" { - count = length(var.master_allowed_remote_ips) - direction = "ingress" - ethertype = "IPv4" - protocol = "tcp" - port_range_min = "6443" - port_range_max = "6443" - remote_ip_prefix = var.master_allowed_remote_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master_ports" { - count = length(var.master_allowed_ports) - direction = "ingress" - ethertype = "IPv4" - protocol = lookup(var.master_allowed_ports[count.index], "protocol", "tcp") - port_range_min = lookup(var.master_allowed_ports[count.index], "port_range_min") - port_range_max = lookup(var.master_allowed_ports[count.index], "port_range_max") - remote_ip_prefix = lookup(var.master_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master_ipv6_ingress" { - count = length(var.master_allowed_remote_ipv6_ips) - direction = "ingress" - ethertype = "IPv6" - protocol = "tcp" - port_range_min = "6443" - port_range_max = "6443" - remote_ip_prefix = var.master_allowed_remote_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master_ports_ipv6_ingress" { - count = length(var.master_allowed_ports_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = lookup(var.master_allowed_ports_ipv6[count.index], "protocol", "tcp") - port_range_min = lookup(var.master_allowed_ports_ipv6[count.index], "port_range_min") - port_range_max = lookup(var.master_allowed_ports_ipv6[count.index], "port_range_max") - remote_ip_prefix = lookup(var.master_allowed_ports_ipv6[count.index], "remote_ip_prefix", "::/0") - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "master_egress_ipv6" { - count = length(var.k8s_allowed_egress_ipv6_ips) - direction = "egress" - ethertype = "IPv6" - remote_ip_prefix = var.k8s_allowed_egress_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_v2" "bastion" { - name = "${var.cluster_name}-bastion" - count = var.number_of_bastions != "" ? 1 : 0 - description = "${var.cluster_name} - Bastion Server" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "bastion" { - count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0 - direction = "ingress" - ethertype = "IPv4" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.bastion_allowed_remote_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" { - count = length(var.bastion_allowed_ports) - direction = "ingress" - ethertype = "IPv4" - protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp") - port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min") - port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max") - remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_rule_v2" "bastion_ipv6_ingress" { - count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ipv6_ips) : 0 - direction = "ingress" - ethertype = "IPv6" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.bastion_allowed_remote_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports_ipv6_ingress" { - count = length(var.bastion_allowed_ports_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = lookup(var.bastion_allowed_ports_ipv6[count.index], "protocol", "tcp") - port_range_min = lookup(var.bastion_allowed_ports_ipv6[count.index], "port_range_min") - port_range_max = lookup(var.bastion_allowed_ports_ipv6[count.index], "port_range_max") - remote_ip_prefix = lookup(var.bastion_allowed_ports_ipv6[count.index], "remote_ip_prefix", "::/0") - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_v2" "k8s" { - name = "${var.cluster_name}-k8s" - description = "${var.cluster_name} - Kubernetes" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "k8s" { - direction = "ingress" - ethertype = "IPv4" - remote_group_id = openstack_networking_secgroup_v2.k8s.id - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_ipv6" { - direction = "ingress" - ethertype = "IPv6" - remote_group_id = openstack_networking_secgroup_v2.k8s.id - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips" { - count = length(var.k8s_allowed_remote_ips) - direction = "ingress" - ethertype = "IPv4" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.k8s_allowed_remote_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips_ipv6" { - count = length(var.k8s_allowed_remote_ips_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.k8s_allowed_remote_ips_ipv6[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "egress" { - count = length(var.k8s_allowed_egress_ips) - direction = "egress" - ethertype = "IPv4" - remote_ip_prefix = var.k8s_allowed_egress_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "egress_ipv6" { - count = length(var.k8s_allowed_egress_ipv6_ips) - direction = "egress" - ethertype = "IPv6" - remote_ip_prefix = var.k8s_allowed_egress_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_v2" "worker" { - name = "${var.cluster_name}-k8s-worker" - description = "${var.cluster_name} - Kubernetes worker nodes" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_v2" "worker_extra" { - count = "%{if var.extra_sec_groups}1%{else}0%{endif}" - name = "${var.cluster_name}-k8s-worker-${var.extra_sec_groups_name}" - description = "${var.cluster_name} - Kubernetes worker nodes - rules not managed by terraform" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "worker" { - count = length(var.worker_allowed_ports) - direction = "ingress" - ethertype = "IPv4" - protocol = lookup(var.worker_allowed_ports[count.index], "protocol", "tcp") - port_range_min = lookup(var.worker_allowed_ports[count.index], "port_range_min") - port_range_max = lookup(var.worker_allowed_ports[count.index], "port_range_max") - remote_ip_prefix = lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") - security_group_id = openstack_networking_secgroup_v2.worker.id -} - -resource "openstack_networking_secgroup_rule_v2" "worker_ipv6_ingress" { - count = length(var.worker_allowed_ports_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = lookup(var.worker_allowed_ports_ipv6[count.index], "protocol", "tcp") - port_range_min = lookup(var.worker_allowed_ports_ipv6[count.index], "port_range_min") - port_range_max = lookup(var.worker_allowed_ports_ipv6[count.index], "port_range_max") - remote_ip_prefix = lookup(var.worker_allowed_ports_ipv6[count.index], "remote_ip_prefix", "::/0") - security_group_id = openstack_networking_secgroup_v2.worker.id -} - -resource "openstack_compute_servergroup_v2" "k8s_master" { - count = var.master_server_group_policy != "" ? 1 : 0 - name = "k8s-master-srvgrp" - policies = [var.master_server_group_policy] -} - -resource "openstack_compute_servergroup_v2" "k8s_node" { - count = var.node_server_group_policy != "" ? 1 : 0 - name = "k8s-node-srvgrp" - policies = [var.node_server_group_policy] -} - -resource "openstack_compute_servergroup_v2" "k8s_etcd" { - count = var.etcd_server_group_policy != "" ? 1 : 0 - name = "k8s-etcd-srvgrp" - policies = [var.etcd_server_group_policy] -} - -resource "openstack_compute_servergroup_v2" "k8s_node_additional" { - for_each = var.additional_server_groups - name = "k8s-${each.key}-srvgrp" - policies = [each.value.policy] -} - -locals { -# master groups - master_sec_groups = compact([ - openstack_networking_secgroup_v2.k8s_master.id, - openstack_networking_secgroup_v2.k8s.id, - var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "", - ]) -# worker groups - worker_sec_groups = compact([ - openstack_networking_secgroup_v2.k8s.id, - openstack_networking_secgroup_v2.worker.id, - var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "", - ]) -# bastion groups - bastion_sec_groups = compact(concat([ - openstack_networking_secgroup_v2.k8s.id, - openstack_networking_secgroup_v2.bastion[0].id, - ])) -# etcd groups - etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) -# glusterfs groups - gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) - -# Image uuid - image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id -# Image_gfs uuid - image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id -# image_master uuidimage_gfs_uuid - image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id - - k8s_nodes_settings = { - for name, node in var.k8s_nodes : - name => { - "use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb) == 0, - "image_id" = node.image_id != null ? node.image_id : local.image_to_use_node, - "volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb, - "volume_type" = node.volume_type != null ? node.volume_type : var.node_volume_type, - "network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id) - "server_group" = node.server_group != null ? [openstack_compute_servergroup_v2.k8s_node_additional[node.server_group].id] : (var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : []) - } - } - - k8s_masters_settings = { - for name, node in var.k8s_masters : - name => { - "use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb) == 0, - "image_id" = node.image_id != null ? node.image_id : local.image_to_use_master, - "volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb, - "volume_type" = node.volume_type != null ? node.volume_type : var.master_volume_type, - "network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id) - } - } -} - -resource "openstack_networking_port_v2" "bastion_port" { - count = var.number_of_bastions - name = "${var.cluster_name}-bastion-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "bastion" { - name = "${var.cluster_name}-bastion-${count.index + 1}" - count = var.number_of_bastions - image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null - flavor_id = var.flavor_bastion - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] - content { - uuid = local.image_to_use_node - source_type = "image" - volume_size = var.bastion_root_volume_size_in_gb - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.bastion_port.*.id, count.index) - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "bastion" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${var.bastion_fips[0]}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "k8s_master_port" { - count = var.number_of_k8s_masters - name = "${var.cluster_name}-k8s-master-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master" { - name = "${var.cluster_name}-k8s-master-${count.index + 1}" - count = var.number_of_k8s_masters - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "k8s_masters_port" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} - name = "${var.cluster_name}-k8s-${each.key}" - network_id = local.k8s_masters_settings[each.key].network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_masters" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} - name = "${var.cluster_name}-k8s-${each.key}" - availability_zone = each.value.az - image_id = local.k8s_masters_settings[each.key].use_local_disk ? local.k8s_masters_settings[each.key].image_id : null - flavor_id = each.value.flavor - key_pair = openstack_compute_keypair_v2.k8s.name - - dynamic "block_device" { - for_each = !local.k8s_masters_settings[each.key].use_local_disk ? [local.k8s_masters_settings[each.key].image_id] : [] - content { - uuid = block_device.value - source_type = "image" - volume_size = local.k8s_masters_settings[each.key].volume_size - volume_type = local.k8s_masters_settings[each.key].volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = openstack_networking_port_v2.k8s_masters_port[each.key].id - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.module}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}" - } -} - -resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" { - count = var.number_of_k8s_masters_no_etcd - name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { - name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}" - count = var.number_of_k8s_masters_no_etcd - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "etcd_port" { - count = var.number_of_etcd - name = "${var.cluster_name}-etcd-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "etcd" { - name = "${var.cluster_name}-etcd-${count.index + 1}" - count = var.number_of_etcd - availability_zone = element(var.az_list, count.index) - image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_etcd - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.etcd_root_volume_size_in_gb - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.etcd_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.etcd_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_etcd[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "etcd,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" { - count = var.number_of_k8s_masters_no_floating_ip - name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { - name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}" - count = var.number_of_k8s_masters_no_floating_ip - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" { - count = var.number_of_k8s_masters_no_floating_ip_no_etcd - name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { - name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}" - count = var.number_of_k8s_masters_no_floating_ip_no_etcd - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_node_port" { - count = var.number_of_k8s_nodes - name = "${var.cluster_name}-k8s-node-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_node" { - name = "${var.cluster_name}-k8s-node-${count.index + 1}" - count = var.number_of_k8s_nodes - availability_zone = element(var.az_list_node, count.index) - image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null - flavor_id = var.flavor_k8s_node - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] - content { - uuid = local.image_to_use_node - source_type = "image" - volume_size = var.node_root_volume_size_in_gb - volume_type = var.node_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index) - } - - - dynamic "scheduler_hints" { - for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_node[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" { - count = var.number_of_k8s_nodes_no_floating_ip - name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { - name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}" - count = var.number_of_k8s_nodes_no_floating_ip - availability_zone = element(var.az_list_node, count.index) - image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null - flavor_id = var.flavor_k8s_node - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] - content { - uuid = local.image_to_use_node - source_type = "image" - volume_size = var.node_root_volume_size_in_gb - volume_type = var.node_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : [] - content { - group = scheduler_hints.value - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_nodes_port" { - for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {} - name = "${var.cluster_name}-k8s-node-${each.key}" - network_id = local.k8s_nodes_settings[each.key].network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_nodes" { - for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {} - name = "${var.cluster_name}-k8s-node-${each.key}" - availability_zone = each.value.az - image_id = local.k8s_nodes_settings[each.key].use_local_disk ? local.k8s_nodes_settings[each.key].image_id : null - flavor_id = each.value.flavor - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", { - extra_partitions = each.value.cloudinit.extra_partitions, - netplan_critical_dhcp_interface = each.value.cloudinit.netplan_critical_dhcp_interface, - }) : data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = !local.k8s_nodes_settings[each.key].use_local_disk ? [local.k8s_nodes_settings[each.key].image_id] : [] - content { - uuid = block_device.value - source_type = "image" - volume_size = local.k8s_nodes_settings[each.key].volume_size - volume_type = local.k8s_nodes_settings[each.key].volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = openstack_networking_port_v2.k8s_nodes_port[each.key].id - } - - dynamic "scheduler_hints" { - for_each = local.k8s_nodes_settings[each.key].server_group - content { - group = scheduler_hints.value - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_node,k8s_cluster,%{if !each.value.floating_ip}no_floating,%{endif}${var.supplementary_node_groups}${each.value.extra_groups != null ? ",${each.value.extra_groups}" : ""}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "%{if each.value.floating_ip}sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}" - } -} - -resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" { - count = var.number_of_gfs_nodes_no_floating_ip - name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { - name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}" - count = var.number_of_gfs_nodes_no_floating_ip - availability_zone = element(var.az_list, count.index) - image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null - flavor_id = var.flavor_gfs_node - key_pair = openstack_compute_keypair_v2.k8s.name - - dynamic "block_device" { - for_each = var.gfs_root_volume_size_in_gb > 0 ? [local.image_to_use_gfs] : [] - content { - uuid = local.image_to_use_gfs - source_type = "image" - volume_size = var.gfs_root_volume_size_in_gb - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_node[0].id - } - } - - metadata = { - ssh_user = var.ssh_user_gfs - kubespray_groups = "gfs-cluster,network-storage,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_floatingip_associate_v2" "bastion" { - count = var.number_of_bastions - floating_ip = var.bastion_fips[count.index] - port_id = element(openstack_networking_port_v2.bastion_port.*.id, count.index) -} - - -resource "openstack_networking_floatingip_associate_v2" "k8s_master" { - count = var.number_of_k8s_masters - floating_ip = var.k8s_master_fips[count.index] - port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index) -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_masters" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {} - floating_ip = var.k8s_masters_fips[each.key].address - port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" { - count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0 - floating_ip = var.k8s_master_no_etcd_fips[count.index] - port_id = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index) -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_node" { - count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0 - floating_ip = var.k8s_node_fips[count.index] - port_id = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index) -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" { - for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {} - floating_ip = var.k8s_nodes_fips[each.key].address - port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id -} - -resource "openstack_blockstorage_volume_v2" "glusterfs_volume" { - name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}" - count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0 - description = "Non-ephemeral volume for GlusterFS" - size = var.gfs_volume_size_in_gb -} - -resource "openstack_compute_volume_attach_v2" "glusterfs_volume" { - count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0 - instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index) - volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index) -} diff --git a/contrib/terraform/openstack/modules/compute/outputs.tf b/contrib/terraform/openstack/modules/compute/outputs.tf deleted file mode 100644 index 741e9f035df..00000000000 --- a/contrib/terraform/openstack/modules/compute/outputs.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "k8s_master_ips" { - value = concat(openstack_compute_instance_v2.k8s_master_no_floating_ip.*, openstack_compute_instance_v2.k8s_master_no_floating_ip_no_etcd.*) -} diff --git a/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl b/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl deleted file mode 100644 index fd05cc44e50..00000000000 --- a/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl +++ /dev/null @@ -1,54 +0,0 @@ -%{~ if length(extra_partitions) > 0 || netplan_critical_dhcp_interface != "" } -#cloud-config -bootcmd: -%{~ for idx, partition in extra_partitions } -- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, ${partition.volume_path} ] -- [ cloud-init-per, once, create-part-${idx}, parted, --script, ${partition.volume_path}, 'mkpart extended ext4 ${partition.partition_start} ${partition.partition_end}' ] -- [ cloud-init-per, once, create-fs-part-${idx}, mkfs.ext4, ${partition.partition_path} ] -%{~ endfor } - -runcmd: -%{~ if netplan_critical_dhcp_interface != "" } - - netplan apply -%{~ endif } -%{~ for idx, partition in extra_partitions } - - mkdir -p ${partition.mount_path} - - chown nobody:nogroup ${partition.mount_path} - - mount ${partition.partition_path} ${partition.mount_path} -%{~ endfor ~} - -%{~ if netplan_critical_dhcp_interface != "" } -write_files: - - path: /etc/netplan/90-critical-dhcp.yaml - content: | - network: - version: 2 - ethernets: - ${ netplan_critical_dhcp_interface }: - dhcp4: true - critical: true -%{~ endif } - -mounts: -%{~ for idx, partition in extra_partitions } - - [ ${partition.partition_path}, ${partition.mount_path} ] -%{~ endfor } -%{~ else ~} -# yamllint disable rule:comments -#cloud-config -## in some cases novnc console access is required -## it requires ssh password to be set -#ssh_pwauth: yes -#chpasswd: -# list: | -# root:secret -# expire: False - -## in some cases direct root ssh access via ssh key is required -#disable_root: false - -## in some cases additional CA certs are required -#ca-certs: -# trusted: | -# -----BEGIN CERTIFICATE----- -%{~ endif } diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf deleted file mode 100644 index ed478de3c2f..00000000000 --- a/contrib/terraform/openstack/modules/compute/variables.tf +++ /dev/null @@ -1,269 +0,0 @@ -variable "cluster_name" {} - -variable "az_list" { - type = list(string) -} - -variable "az_list_node" { - type = list(string) -} - -variable "number_of_k8s_masters" {} - -variable "number_of_k8s_masters_no_etcd" {} - -variable "number_of_etcd" {} - -variable "number_of_k8s_masters_no_floating_ip" {} - -variable "number_of_k8s_masters_no_floating_ip_no_etcd" {} - -variable "number_of_k8s_nodes" {} - -variable "number_of_k8s_nodes_no_floating_ip" {} - -variable "number_of_bastions" {} - -variable "number_of_gfs_nodes_no_floating_ip" {} - -variable "bastion_root_volume_size_in_gb" {} - -variable "etcd_root_volume_size_in_gb" {} - -variable "master_root_volume_size_in_gb" {} - -variable "node_root_volume_size_in_gb" {} - -variable "gfs_root_volume_size_in_gb" {} - -variable "gfs_volume_size_in_gb" {} - -variable "master_volume_type" {} - -variable "node_volume_type" {} - -variable "public_key_path" {} - -variable "image" {} - -variable "image_gfs" {} - -variable "ssh_user" {} - -variable "ssh_user_gfs" {} - -variable "flavor_k8s_master" {} - -variable "flavor_k8s_node" {} - -variable "flavor_etcd" {} - -variable "flavor_gfs_node" {} - -variable "network_name" {} - -variable "flavor_bastion" {} - -variable "network_id" { - default = "" -} - -variable "use_existing_network" { - type = bool -} - -variable "network_router_id" { - default = "" -} - -variable "k8s_master_fips" { - type = list -} - -variable "k8s_master_no_etcd_fips" { - type = list -} - -variable "k8s_node_fips" { - type = list -} - -variable "k8s_masters_fips" { - type = map(object({ - address = string - })) -} - -variable "k8s_nodes_fips" { - type = map(object({ - address = string - })) -} - -variable "bastion_fips" { - type = list -} - -variable "bastion_allowed_remote_ips" { - type = list -} - -variable "bastion_allowed_remote_ipv6_ips" { - type = list -} - -variable "master_allowed_remote_ips" { - type = list -} - -variable "master_allowed_remote_ipv6_ips" { - type = list -} - -variable "k8s_allowed_remote_ips" { - type = list -} - -variable "k8s_allowed_remote_ips_ipv6" { - type = list -} - -variable "k8s_allowed_egress_ips" { - type = list -} - -variable "k8s_allowed_egress_ipv6_ips" { - type = list -} - -variable "k8s_masters" { - type = map(object({ - az = string - flavor = string - etcd = bool - floating_ip = bool - reserved_floating_ip = optional(string) - image_id = optional(string) - root_volume_size_in_gb = optional(number) - volume_type = optional(string) - network_id = optional(string) - })) -} - -variable "k8s_nodes" { - type = map(object({ - az = string - flavor = string - floating_ip = bool - reserved_floating_ip = optional(string) - extra_groups = optional(string) - image_id = optional(string) - root_volume_size_in_gb = optional(number) - volume_type = optional(string) - network_id = optional(string) - additional_server_groups = optional(list(string)) - server_group = optional(string) - cloudinit = optional(object({ - extra_partitions = optional(list(object({ - volume_path = string - partition_path = string - partition_start = string - partition_end = string - mount_path = string - })), []) - netplan_critical_dhcp_interface = optional(string, "") - })) - })) -} - -variable "additional_server_groups" { - type = map(object({ - policy = string - })) -} - -variable "supplementary_master_groups" { - default = "" -} - -variable "supplementary_node_groups" { - default = "" -} - -variable "master_allowed_ports" { - type = list -} - -variable "master_allowed_ports_ipv6" { - type = list -} - -variable "worker_allowed_ports" { - type = list -} - -variable "worker_allowed_ports_ipv6" { - type = list -} - -variable "bastion_allowed_ports" { - type = list -} - -variable "bastion_allowed_ports_ipv6" { - type = list -} - -variable "use_access_ip" {} - -variable "master_server_group_policy" { - type = string -} - -variable "node_server_group_policy" { - type = string -} - -variable "etcd_server_group_policy" { - type = string -} - -variable "extra_sec_groups" { - type = bool -} - -variable "extra_sec_groups_name" { - type = string -} - -variable "image_uuid" { - type = string -} - -variable "image_gfs_uuid" { - type = string -} - -variable "image_master" { - type = string -} - -variable "image_master_uuid" { - type = string -} - -variable "group_vars_path" { - type = string -} - -variable "port_security_enabled" { - type = bool -} - -variable "force_null_port_security" { - type = bool -} - -variable "private_subnet_id" { - type = string -} diff --git a/contrib/terraform/openstack/modules/compute/versions.tf b/contrib/terraform/openstack/modules/compute/versions.tf deleted file mode 100644 index bfcf77a5c0d..00000000000 --- a/contrib/terraform/openstack/modules/compute/versions.tf +++ /dev/null @@ -1,8 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 1.3.0" -} diff --git a/contrib/terraform/openstack/modules/ips/main.tf b/contrib/terraform/openstack/modules/ips/main.tf deleted file mode 100644 index 68a4af3ecdc..00000000000 --- a/contrib/terraform/openstack/modules/ips/main.tf +++ /dev/null @@ -1,46 +0,0 @@ -resource "null_resource" "dummy_dependency" { - triggers = { - dependency_id = var.router_id - } - depends_on = [ - var.router_internal_port_id - ] -} - -# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones. -resource "openstack_networking_floatingip_v2" "k8s_master" { - count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "k8s_masters" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({}) - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones. -resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" { - count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "k8s_node" { - count = var.number_of_k8s_nodes - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "bastion" { - count = length(var.bastion_fips) > 0 ? 0 : var.number_of_bastions - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "k8s_nodes" { - for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({}) - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} diff --git a/contrib/terraform/openstack/modules/ips/outputs.tf b/contrib/terraform/openstack/modules/ips/outputs.tf deleted file mode 100644 index 670481109af..00000000000 --- a/contrib/terraform/openstack/modules/ips/outputs.tf +++ /dev/null @@ -1,48 +0,0 @@ -locals { - k8s_masters_reserved_fips = { - for key, value in var.k8s_masters : key => { - address = value.reserved_floating_ip - } if value.floating_ip && (lookup(value, "reserved_floating_ip", "") != "") - } - k8s_masters_create_fips = { - for key, value in openstack_networking_floatingip_v2.k8s_masters : key => { - address = value.address - } - } - k8s_nodes_reserved_fips = { - for key, value in var.k8s_nodes : key => { - address = value.reserved_floating_ip - } if value.floating_ip && (lookup(value, "reserved_floating_ip", "") != "") - } - k8s_nodes_create_fips = { - for key, value in openstack_networking_floatingip_v2.k8s_nodes : key => { - address = value.address - } - } -} - -# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created. -output "k8s_master_fips" { - value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address -} - -output "k8s_masters_fips" { - value = merge(local.k8s_masters_create_fips, local.k8s_masters_reserved_fips) -} - -# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created. -output "k8s_master_no_etcd_fips" { - value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address -} - -output "k8s_node_fips" { - value = openstack_networking_floatingip_v2.k8s_node[*].address -} - -output "k8s_nodes_fips" { - value = merge(local.k8s_nodes_create_fips, local.k8s_nodes_reserved_fips) -} - -output "bastion_fips" { - value = length(var.bastion_fips) > 0 ? var.bastion_fips : openstack_networking_floatingip_v2.bastion[*].address -} diff --git a/contrib/terraform/openstack/modules/ips/variables.tf b/contrib/terraform/openstack/modules/ips/variables.tf deleted file mode 100644 index b52888b847f..00000000000 --- a/contrib/terraform/openstack/modules/ips/variables.tf +++ /dev/null @@ -1,27 +0,0 @@ -variable "number_of_k8s_masters" {} - -variable "number_of_k8s_masters_no_etcd" {} - -variable "number_of_k8s_nodes" {} - -variable "floatingip_pool" {} - -variable "number_of_bastions" {} - -variable "external_net" {} - -variable "network_name" {} - -variable "router_id" { - default = "" -} - -variable "k8s_masters" {} - -variable "k8s_nodes" {} - -variable "k8s_master_fips" {} - -variable "bastion_fips" {} - -variable "router_internal_port_id" {} diff --git a/contrib/terraform/openstack/modules/ips/versions.tf b/contrib/terraform/openstack/modules/ips/versions.tf deleted file mode 100644 index b7bf5a9cde3..00000000000 --- a/contrib/terraform/openstack/modules/ips/versions.tf +++ /dev/null @@ -1,11 +0,0 @@ -terraform { - required_providers { - null = { - source = "hashicorp/null" - } - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 0.12.26" -} diff --git a/contrib/terraform/openstack/modules/loadbalancer/main.tf b/contrib/terraform/openstack/modules/loadbalancer/main.tf deleted file mode 100644 index 12fa225ea6e..00000000000 --- a/contrib/terraform/openstack/modules/loadbalancer/main.tf +++ /dev/null @@ -1,54 +0,0 @@ -resource "openstack_lb_loadbalancer_v2" "k8s_lb" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "${var.cluster_name}-api-loadbalancer" - vip_subnet_id = var.subnet_id -} - -resource "openstack_lb_listener_v2" "api_listener"{ - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "api-listener" - protocol = "TCP" - protocol_port = var.k8s_master_loadbalancer_listener_port - loadbalancer_id = openstack_lb_loadbalancer_v2.k8s_lb[0].id - depends_on = [ openstack_lb_loadbalancer_v2.k8s_lb ] -} - -resource "openstack_lb_pool_v2" "api_pool" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "api-pool" - protocol = "TCP" - lb_method = "ROUND_ROBIN" - listener_id = openstack_lb_listener_v2.api_listener[0].id - depends_on = [ openstack_lb_listener_v2.api_listener ] -} - -resource "openstack_lb_member_v2" "lb_member" { - count = var.k8s_master_loadbalancer_enabled ? length(var.k8s_master_ips) : 0 - name = var.k8s_master_ips[count.index].name - pool_id = openstack_lb_pool_v2.api_pool[0].id - address = var.k8s_master_ips[count.index].access_ip_v4 - protocol_port = var.k8s_master_loadbalancer_server_port - depends_on = [ openstack_lb_pool_v2.api_pool ] -} - -resource "openstack_lb_monitor_v2" "monitor" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "Api Monitor" - pool_id = openstack_lb_pool_v2.api_pool[0].id - type = "TCP" - delay = 10 - timeout = 5 - max_retries = 5 -} - -resource "openstack_networking_floatingip_v2" "floatip_1" { - count = var.k8s_master_loadbalancer_enabled && var.k8s_master_loadbalancer_public_ip == "" ? 1 : 0 - pool = var.floatingip_pool -} - -resource "openstack_networking_floatingip_associate_v2" "public_ip" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - floating_ip = var.k8s_master_loadbalancer_public_ip != "" ? var.k8s_master_loadbalancer_public_ip : openstack_networking_floatingip_v2.floatip_1[0].address - port_id = openstack_lb_loadbalancer_v2.k8s_lb[0].vip_port_id - depends_on = [ openstack_lb_loadbalancer_v2.k8s_lb ] -} diff --git a/contrib/terraform/openstack/modules/loadbalancer/variables.tf b/contrib/terraform/openstack/modules/loadbalancer/variables.tf deleted file mode 100644 index 40b1b588cef..00000000000 --- a/contrib/terraform/openstack/modules/loadbalancer/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "cluster_name" {} - -variable "subnet_id" {} - -variable "floatingip_pool" {} - -variable "k8s_master_ips" {} - -variable "k8s_master_loadbalancer_enabled" {} - -variable "k8s_master_loadbalancer_listener_port" {} - -variable "k8s_master_loadbalancer_server_port" {} - -variable "k8s_master_loadbalancer_public_ip" {} diff --git a/contrib/terraform/openstack/modules/loadbalancer/versions.tf b/contrib/terraform/openstack/modules/loadbalancer/versions.tf deleted file mode 100644 index 6c942790da8..00000000000 --- a/contrib/terraform/openstack/modules/loadbalancer/versions.tf +++ /dev/null @@ -1,8 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 0.12.26" -} diff --git a/contrib/terraform/openstack/modules/network/main.tf b/contrib/terraform/openstack/modules/network/main.tf deleted file mode 100644 index a6324d7edab..00000000000 --- a/contrib/terraform/openstack/modules/network/main.tf +++ /dev/null @@ -1,34 +0,0 @@ -resource "openstack_networking_router_v2" "k8s" { - name = "${var.cluster_name}-router" - count = var.use_neutron == 1 && var.router_id == null ? 1 : 0 - admin_state_up = "true" - external_network_id = var.external_net -} - -data "openstack_networking_router_v2" "k8s" { - router_id = var.router_id - count = var.use_neutron == 1 && var.router_id != null ? 1 : 0 -} - -resource "openstack_networking_network_v2" "k8s" { - name = var.network_name - count = var.use_neutron - dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null - admin_state_up = "true" - port_security_enabled = var.port_security_enabled -} - -resource "openstack_networking_subnet_v2" "k8s" { - name = "${var.cluster_name}-internal-network" - count = var.use_neutron - network_id = openstack_networking_network_v2.k8s[count.index].id - cidr = var.subnet_cidr - ip_version = 4 - dns_nameservers = var.dns_nameservers -} - -resource "openstack_networking_router_interface_v2" "k8s" { - count = var.use_neutron - router_id = "%{if openstack_networking_router_v2.k8s != []}${openstack_networking_router_v2.k8s[count.index].id}%{else}${var.router_id}%{endif}" - subnet_id = openstack_networking_subnet_v2.k8s[count.index].id -} diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf deleted file mode 100644 index 0e8a5004f33..00000000000 --- a/contrib/terraform/openstack/modules/network/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "router_id" { - value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}" -} - -output "network_id" { - value = element(concat(openstack_networking_network_v2.k8s.*.id, [""]),0) -} - -output "router_internal_port_id" { - value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0) -} - -output "subnet_id" { - value = element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0) -} diff --git a/contrib/terraform/openstack/modules/network/variables.tf b/contrib/terraform/openstack/modules/network/variables.tf deleted file mode 100644 index 6cd7ff72e5b..00000000000 --- a/contrib/terraform/openstack/modules/network/variables.tf +++ /dev/null @@ -1,21 +0,0 @@ -variable "external_net" {} - -variable "network_name" {} - -variable "network_dns_domain" {} - -variable "cluster_name" {} - -variable "dns_nameservers" { - type = list -} - -variable "port_security_enabled" { - type = bool -} - -variable "subnet_cidr" {} - -variable "use_neutron" {} - -variable "router_id" {} diff --git a/contrib/terraform/openstack/modules/network/versions.tf b/contrib/terraform/openstack/modules/network/versions.tf deleted file mode 100644 index 6c942790da8..00000000000 --- a/contrib/terraform/openstack/modules/network/versions.tf +++ /dev/null @@ -1,8 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 0.12.26" -} diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tfvars b/contrib/terraform/openstack/sample-inventory/cluster.tfvars deleted file mode 100644 index 8ab7c6d38e4..00000000000 --- a/contrib/terraform/openstack/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,89 +0,0 @@ -# your Kubernetes cluster name here -cluster_name = "i-didnt-read-the-docs" - -# list of availability zones available in your OpenStack cluster -#az_list = ["nova"] - -# SSH key to use for access to nodes -public_key_path = "~/.ssh/id_rsa.pub" - -# image to use for bastion, masters, standalone etcd instances, and nodes -image = "" - -# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.) -ssh_user = "" - -# 0|1 bastion nodes -number_of_bastions = 0 - -#flavor_bastion = "" - -# standalone etcds -number_of_etcd = 0 - -# masters -number_of_k8s_masters = 1 - -number_of_k8s_masters_no_etcd = 0 - -number_of_k8s_masters_no_floating_ip = 0 - -number_of_k8s_masters_no_floating_ip_no_etcd = 0 - -flavor_k8s_master = "" - -k8s_masters = { - # "master-1" = { - # "az" = "nova" - # "flavor" = "" - # "floating_ip" = true - # "etcd" = true - # }, - # "master-2" = { - # "az" = "nova" - # "flavor" = "" - # "floating_ip" = false - # "etcd" = true - # }, - # "master-3" = { - # "az" = "nova" - # "flavor" = "" - # "floating_ip" = true - # "etcd" = true - # }, -} - - -# nodes -number_of_k8s_nodes = 2 - -number_of_k8s_nodes_no_floating_ip = 4 - -#flavor_k8s_node = "" - -# GlusterFS -# either 0 or more than one -#number_of_gfs_nodes_no_floating_ip = 0 -#gfs_volume_size_in_gb = 150 -# Container Linux does not support GlusterFS -#image_gfs = "" -# May be different from other nodes -#ssh_user_gfs = "ubuntu" -#flavor_gfs_node = "" - -# networking -network_name = "" - -# Use a existing network with the name of network_name. Set to false to create a network with name of network_name. -# use_existing_network = true - -external_net = "" - -subnet_cidr = "" - -floatingip_pool = "" - -bastion_allowed_remote_ips = ["0.0.0.0/0"] - -# Force port security to be null. Some cloud providers do not allow to set port security. -# force_null_port_security = false diff --git a/contrib/terraform/openstack/sample-inventory/group_vars b/contrib/terraform/openstack/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/openstack/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf deleted file mode 100644 index 90416df50d0..00000000000 --- a/contrib/terraform/openstack/variables.tf +++ /dev/null @@ -1,411 +0,0 @@ -variable "cluster_name" { - default = "example" -} - -variable "az_list" { - description = "List of Availability Zones to use for masters in your OpenStack cluster" - type = list(string) - default = ["nova"] -} - -variable "az_list_node" { - description = "List of Availability Zones to use for nodes in your OpenStack cluster" - type = list(string) - default = ["nova"] -} - -variable "number_of_bastions" { - default = 1 -} - -variable "number_of_k8s_masters" { - default = 2 -} - -variable "number_of_k8s_masters_no_etcd" { - default = 2 -} - -variable "number_of_etcd" { - default = 2 -} - -variable "number_of_k8s_masters_no_floating_ip" { - default = 2 -} - -variable "number_of_k8s_masters_no_floating_ip_no_etcd" { - default = 2 -} - -variable "number_of_k8s_nodes" { - default = 1 -} - -variable "number_of_k8s_nodes_no_floating_ip" { - default = 1 -} - -variable "number_of_gfs_nodes_no_floating_ip" { - default = 0 -} - -variable "bastion_root_volume_size_in_gb" { - default = 0 -} - -variable "etcd_root_volume_size_in_gb" { - default = 0 -} - -variable "master_root_volume_size_in_gb" { - default = 0 -} - -variable "node_root_volume_size_in_gb" { - default = 0 -} - -variable "gfs_root_volume_size_in_gb" { - default = 0 -} - -variable "gfs_volume_size_in_gb" { - default = 75 -} - -variable "master_volume_type" { - default = "Default" -} - -variable "node_volume_type" { - default = "Default" -} - -variable "public_key_path" { - description = "The path of the ssh pub key" - default = "~/.ssh/id_rsa.pub" -} - -variable "image" { - description = "the image to use" - default = "" -} - -variable "image_gfs" { - description = "Glance image to use for GlusterFS" - default = "" -} - -variable "ssh_user" { - description = "used to fill out tags for ansible inventory" - default = "ubuntu" -} - -variable "ssh_user_gfs" { - description = "used to fill out tags for ansible inventory" - default = "ubuntu" -} - -variable "flavor_bastion" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_k8s_master" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_k8s_node" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_etcd" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_gfs_node" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "network_name" { - description = "name of the internal network to use" - default = "internal" -} - -variable "use_existing_network" { - description = "Use an existing network" - type = bool - default = "false" -} - -variable "network_dns_domain" { - description = "dns_domain for the internal network" - type = string - default = null -} - -variable "use_neutron" { - description = "Use neutron" - default = 1 -} - -variable "port_security_enabled" { - description = "Enable port security on the internal network" - type = bool - default = "true" -} - -variable "force_null_port_security" { - description = "Force port security to be null. Some providers does not allow setting port security" - type = bool - default = "false" -} - -variable "subnet_cidr" { - description = "Subnet CIDR block." - type = string - default = "10.0.0.0/24" -} - -variable "dns_nameservers" { - description = "An array of DNS name server names used by hosts in this subnet." - type = list(string) - default = [] -} - -variable "k8s_master_fips" { - description = "specific pre-existing floating IPs to use for master nodes" - type = list(string) - default = [] -} - -variable "bastion_fips" { - description = "specific pre-existing floating IPs to use for bastion node" - type = list(string) - default = [] -} - -variable "floatingip_pool" { - description = "name of the floating ip pool to use" - default = "external" -} - -variable "wait_for_floatingip" { - description = "Terraform will poll the instance until the floating IP has been associated." - default = "false" -} - -variable "external_net" { - description = "uuid of the external/public network" -} - -variable "supplementary_master_groups" { - description = "supplementary kubespray ansible groups for masters, such kube_node" - default = "" -} - -variable "supplementary_node_groups" { - description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress" - default = "" -} - -variable "bastion_allowed_remote_ips" { - description = "An array of CIDRs allowed to SSH to hosts" - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "bastion_allowed_remote_ipv6_ips" { - description = "An array of IPv6 CIDRs allowed to SSH to hosts" - type = list(string) - default = ["::/0"] -} - -variable "master_allowed_remote_ips" { - description = "An array of CIDRs allowed to access API of masters" - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "master_allowed_remote_ipv6_ips" { - description = "An array of IPv6 CIDRs allowed to access API of masters" - type = list(string) - default = ["::/0"] -} - -variable "k8s_allowed_remote_ips" { - description = "An array of CIDRs allowed to SSH to hosts" - type = list(string) - default = [] -} - -variable "k8s_allowed_remote_ips_ipv6" { - description = "An array of IPv6 CIDRs allowed to SSH to hosts" - type = list(string) - default = [] -} - -variable "k8s_allowed_egress_ips" { - description = "An array of CIDRs allowed for egress traffic" - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "k8s_allowed_egress_ipv6_ips" { - description = "An array of CIDRs allowed for egress IPv6 traffic" - type = list(string) - default = ["::/0"] -} - -variable "master_allowed_ports" { - type = list(any) - - default = [] -} - -variable "master_allowed_ports_ipv6" { - type = list(any) - - default = [] -} - -variable "worker_allowed_ports" { - type = list(any) - - default = [ - { - "protocol" = "tcp" - "port_range_min" = 30000 - "port_range_max" = 32767 - "remote_ip_prefix" = "0.0.0.0/0" - }, - ] -} - -variable "worker_allowed_ports_ipv6" { - type = list(any) - - default = [ - { - "protocol" = "tcp" - "port_range_min" = 30000 - "port_range_max" = 32767 - "remote_ip_prefix" = "::/0" - }, - ] -} - -variable "bastion_allowed_ports" { - type = list(any) - - default = [] -} - -variable "bastion_allowed_ports_ipv6" { - type = list(any) - - default = [] -} - -variable "use_access_ip" { - default = 1 -} - -variable "master_server_group_policy" { - description = "desired server group policy, e.g. anti-affinity" - default = "" -} - -variable "node_server_group_policy" { - description = "desired server group policy, e.g. anti-affinity" - default = "" -} - -variable "etcd_server_group_policy" { - description = "desired server group policy, e.g. anti-affinity" - default = "" -} - -variable "router_id" { - description = "uuid of an externally defined router to use" - default = null -} - -variable "router_internal_port_id" { - description = "uuid of the port connection our router to our network" - default = null -} - -variable "k8s_masters" { - default = {} -} - -variable "k8s_nodes" { - default = {} -} - -variable "additional_server_groups" { - default = {} - type = map(object({ - policy = string - })) -} - -variable "extra_sec_groups" { - default = false -} - -variable "extra_sec_groups_name" { - default = "custom" -} - -variable "image_uuid" { - description = "uuid of image inside openstack to use" - default = "" -} - -variable "image_gfs_uuid" { - description = "uuid of image to be used on gluster fs nodes. If empty defaults to image_uuid" - default = "" -} - -variable "image_master" { - description = "uuid of image inside openstack to use" - default = "" -} - -variable "image_master_uuid" { - description = "uuid of image to be used on master nodes. If empty defaults to image_uuid" - default = "" -} - -variable "group_vars_path" { - description = "path to the inventory group vars directory" - type = string - default = "./group_vars" -} - -variable "k8s_master_loadbalancer_enabled" { - type = bool - default = "false" -} - -variable "k8s_master_loadbalancer_listener_port" { - type = string - default = "6443" -} - -variable "k8s_master_loadbalancer_server_port" { - type = string - default = 6443 -} - -variable "k8s_master_loadbalancer_public_ip" { - type = string - default = "" -} diff --git a/contrib/terraform/openstack/versions.tf b/contrib/terraform/openstack/versions.tf deleted file mode 100644 index 6e4c1045bcf..00000000000 --- a/contrib/terraform/openstack/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - version = "~> 1.17" - } - } - required_version = ">= 1.3.0" -} diff --git a/contrib/terraform/terraform.py b/contrib/terraform/terraform.py deleted file mode 100755 index 9f6132711ed..00000000000 --- a/contrib/terraform/terraform.py +++ /dev/null @@ -1,475 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2015 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# original: https://github.com/CiscoCloud/terraform.py - -"""\ -Dynamic inventory for Terraform - finds all `.tfstate` files below the working -directory and generates an inventory based on them. -""" -import argparse -from collections import defaultdict -import random -from functools import wraps -import json -import os -import re - -VERSION = '0.4.0pre' - - -def tfstates(root=None): - root = root or os.getcwd() - for dirpath, _, filenames in os.walk(root): - for name in filenames: - if os.path.splitext(name)[-1] == '.tfstate': - yield os.path.join(dirpath, name) - -def convert_to_v3_structure(attributes, prefix=''): - """ Convert the attributes from v4 to v3 - Receives a dict and return a dictionary """ - result = {} - if isinstance(attributes, str): - # In the case when we receive a string (e.g. values for security_groups) - return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes} - for key, value in attributes.items(): - if isinstance(value, list): - if len(value): - result['{}{}.#'.format(prefix, key, hash)] = len(value) - for i, v in enumerate(value): - result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i))) - elif isinstance(value, dict): - result['{}{}.%'.format(prefix, key)] = len(value) - for k, v in value.items(): - result['{}{}.{}'.format(prefix, key, k)] = v - else: - result['{}{}'.format(prefix, key)] = value - return result - -def iterresources(filenames): - for filename in filenames: - with open(filename, 'r') as json_file: - state = json.load(json_file) - tf_version = state['version'] - if tf_version == 3: - for module in state['modules']: - name = module['path'][-1] - for key, resource in module['resources'].items(): - yield name, key, resource - elif tf_version == 4: - # In version 4 the structure changes so we need to iterate - # each instance inside the resource branch. - for resource in state['resources']: - name = resource['provider'].split('.')[-1] - for instance in resource['instances']: - key = "{}.{}".format(resource['type'], resource['name']) - if 'index_key' in instance: - key = "{}.{}".format(key, instance['index_key']) - data = {} - data['type'] = resource['type'] - data['provider'] = resource['provider'] - data['depends_on'] = instance.get('depends_on', []) - data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])} - if 'id' in instance['attributes']: - data['primary']['id'] = instance['attributes']['id'] - data['primary']['meta'] = instance['attributes'].get('meta',{}) - yield name, key, data - else: - raise KeyError('tfstate version %d not supported' % tf_version) - - -## READ RESOURCES -PARSERS = {} - - -def _clean_dc(dcname): - # Consul DCs are strictly alphanumeric with underscores and hyphens - - # ensure that the consul_dc attribute meets these requirements. - return re.sub(r'[^\w_\-]', '-', dcname) - - -def iterhosts(resources): - '''yield host tuples of (name, attributes, groups)''' - for module_name, key, resource in resources: - resource_type, name = key.split('.', 1) - try: - parser = PARSERS[resource_type] - except KeyError: - continue - - yield parser(resource, module_name) - - -def iterips(resources): - '''yield ip tuples of (port_id, ip)''' - for module_name, key, resource in resources: - resource_type, name = key.split('.', 1) - if resource_type == 'openstack_networking_floatingip_associate_v2': - yield openstack_floating_ips(resource) - - -def parses(prefix): - def inner(func): - PARSERS[prefix] = func - return func - - return inner - - -def calculate_mantl_vars(func): - """calculate Mantl vars""" - - @wraps(func) - def inner(*args, **kwargs): - name, attrs, groups = func(*args, **kwargs) - - # attrs - if attrs.get('role', '') == 'control': - attrs['consul_is_server'] = True - else: - attrs['consul_is_server'] = False - - # groups - if attrs.get('publicly_routable', False): - groups.append('publicly_routable') - - return name, attrs, groups - - return inner - - -def _parse_prefix(source, prefix, sep='.'): - for compkey, value in list(source.items()): - try: - curprefix, rest = compkey.split(sep, 1) - except ValueError: - continue - - if curprefix != prefix or rest == '#': - continue - - yield rest, value - - -def parse_attr_list(source, prefix, sep='.'): - attrs = defaultdict(dict) - for compkey, value in _parse_prefix(source, prefix, sep): - idx, key = compkey.split(sep, 1) - attrs[idx][key] = value - - return list(attrs.values()) - - -def parse_dict(source, prefix, sep='.'): - return dict(_parse_prefix(source, prefix, sep)) - - -def parse_list(source, prefix, sep='.'): - return [value for _, value in _parse_prefix(source, prefix, sep)] - - -def parse_bool(string_form): - if type(string_form) is bool: - return string_form - - token = string_form.lower()[0] - - if token == 't': - return True - elif token == 'f': - return False - else: - raise ValueError('could not convert %r to a bool' % string_form) - -def sanitize_groups(groups): - _groups = [] - chars_to_replace = ['+', '-', '=', '.', '/', ' '] - for i in groups: - _i = i - for char in chars_to_replace: - _i = _i.replace(char, '_') - _groups.append(_i) - groups.clear() - groups.extend(_groups) - -@parses('equinix_metal_device') -def equinix_metal_device(resource, tfvars=None): - raw_attrs = resource['primary']['attributes'] - name = raw_attrs['hostname'] - groups = [] - - attrs = { - 'id': raw_attrs['id'], - 'facilities': parse_list(raw_attrs, 'facilities'), - 'hostname': raw_attrs['hostname'], - 'operating_system': raw_attrs['operating_system'], - 'locked': parse_bool(raw_attrs['locked']), - 'tags': parse_list(raw_attrs, 'tags'), - 'plan': raw_attrs['plan'], - 'project_id': raw_attrs['project_id'], - 'state': raw_attrs['state'], - # ansible - 'ansible_host': raw_attrs['network.0.address'], - 'ansible_ssh_user': 'root', # Use root by default in metal - # generic - 'ipv4_address': raw_attrs['network.0.address'], - 'public_ipv4': raw_attrs['network.0.address'], - 'ipv6_address': raw_attrs['network.1.address'], - 'public_ipv6': raw_attrs['network.1.address'], - 'private_ipv4': raw_attrs['network.2.address'], - 'provider': 'equinix', - } - - if raw_attrs['operating_system'] == 'flatcar_stable': - # For Flatcar set the ssh_user to core - attrs.update({'ansible_ssh_user': 'core'}) - - # add groups based on attrs - groups.append('equinix_metal_operating_system_%s' % attrs['operating_system']) - groups.append('equinix_metal_locked_%s' % attrs['locked']) - groups.append('equinix_metal_state_%s' % attrs['state']) - groups.append('equinix_metal_plan_%s' % attrs['plan']) - - # groups specific to kubespray - groups = groups + attrs['tags'] - sanitize_groups(groups) - - return name, attrs, groups - - -def openstack_floating_ips(resource): - raw_attrs = resource['primary']['attributes'] - attrs = { - 'ip': raw_attrs['floating_ip'], - 'port_id': raw_attrs['port_id'], - } - return attrs - -def openstack_floating_ips(resource): - raw_attrs = resource['primary']['attributes'] - return raw_attrs['port_id'], raw_attrs['floating_ip'] - -@parses('openstack_compute_instance_v2') -@calculate_mantl_vars -def openstack_host(resource, module_name): - raw_attrs = resource['primary']['attributes'] - name = raw_attrs['name'] - groups = [] - - attrs = { - 'access_ip_v4': raw_attrs['access_ip_v4'], - 'access_ip_v6': raw_attrs['access_ip_v6'], - 'access_ip': raw_attrs['access_ip_v4'], - 'access_ip6': raw_attrs['access_ip_v6'], - 'ip': raw_attrs['network.0.fixed_ip_v4'], - 'flavor': parse_dict(raw_attrs, 'flavor', - sep='_'), - 'id': raw_attrs['id'], - 'image': parse_dict(raw_attrs, 'image', - sep='_'), - 'key_pair': raw_attrs['key_pair'], - 'metadata': parse_dict(raw_attrs, 'metadata'), - 'network': parse_attr_list(raw_attrs, 'network'), - 'region': raw_attrs.get('region', ''), - 'security_groups': parse_list(raw_attrs, 'security_groups'), - # workaround for an OpenStack bug where hosts have a different domain - # after they're restarted - 'host_domain': 'novalocal', - 'use_host_domain': True, - # generic - 'public_ipv4': raw_attrs['access_ip_v4'], - 'private_ipv4': raw_attrs['access_ip_v4'], - 'port_id' : raw_attrs['network.0.port'], - 'provider': 'openstack', - } - - if 'floating_ip' in raw_attrs: - attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4'] - - if 'metadata.use_access_ip' in raw_attrs and raw_attrs['metadata.use_access_ip'] == "0": - attrs.pop('access_ip') - - try: - if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1": - attrs.update({ - 'ansible_host': re.sub(r"[\[\]]", "", raw_attrs['access_ip_v6']), - 'publicly_routable': True, - }) - else: - attrs.update({ - 'ansible_host': raw_attrs['access_ip_v4'], - 'publicly_routable': True, - }) - except (KeyError, ValueError): - attrs.update({'ansible_host': '', 'publicly_routable': False}) - - # Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017 - - # attrs specific to Ansible - if 'metadata.ssh_user' in raw_attrs: - attrs['ansible_user'] = raw_attrs['metadata.ssh_user'] - if 'metadata.ssh_port' in raw_attrs: - attrs['ansible_port'] = raw_attrs['metadata.ssh_port'] - - if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0: - device_index = 1 - for key, value in list(raw_attrs.items()): - match = re.search("^volume.*.device$", key) - if match: - attrs['disk_volume_device_'+str(device_index)] = value - device_index += 1 - - - # attrs specific to Mantl - attrs.update({ - 'role': attrs['metadata'].get('role', 'none') - }) - - # add groups based on attrs - groups.append('os_image=' + str(attrs['image']['id'])) - groups.append('os_flavor=' + str(attrs['flavor']['name'])) - groups.extend('os_metadata_%s=%s' % item - for item in list(attrs['metadata'].items())) - groups.append('os_region=' + str(attrs['region'])) - - # groups specific to kubespray - for group in attrs['metadata'].get('kubespray_groups', "").split(","): - groups.append(group) - - sanitize_groups(groups) - - return name, attrs, groups - - -def iter_host_ips(hosts, ips): - '''Update hosts that have an entry in the floating IP list''' - for host in hosts: - port_id = host[1]['port_id'] - - if port_id in ips: - ip = ips[port_id] - - host[1].update({ - 'access_ip_v4': ip, - 'access_ip': ip, - 'public_ipv4': ip, - 'ansible_host': ip, - }) - - if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0" and 'access_ip' in host[1]: - host[1].pop('access_ip') - - yield host - - -## QUERY TYPES -def query_host(hosts, target): - for name, attrs, _ in hosts: - if name == target: - return attrs - - return {} - - -def query_list(hosts): - groups = defaultdict(dict) - meta = {} - - for name, attrs, hostgroups in hosts: - for group in set(hostgroups): - # Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf - # Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all" - if not group: group = "all" - - groups[group].setdefault('hosts', []) - groups[group]['hosts'].append(name) - - meta[name] = attrs - - groups['_meta'] = {'hostvars': meta} - return groups - - -def query_hostfile(hosts): - out = ['## begin hosts generated by terraform.py ##'] - out.extend( - '{}\t{}'.format(attrs['ansible_host'].ljust(16), name) - for name, attrs, _ in hosts - ) - - out.append('## end hosts generated by terraform.py ##') - return '\n'.join(out) - - -def main(): - parser = argparse.ArgumentParser( - __file__, __doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - modes = parser.add_mutually_exclusive_group(required=True) - modes.add_argument('--list', - action='store_true', - help='list all variables') - modes.add_argument('--host', help='list variables for a single host') - modes.add_argument('--version', - action='store_true', - help='print version and exit') - modes.add_argument('--hostfile', - action='store_true', - help='print hosts as a /etc/hosts snippet') - parser.add_argument('--pretty', - action='store_true', - help='pretty-print output JSON') - parser.add_argument('--nometa', - action='store_true', - help='with --list, exclude hostvars') - default_root = os.environ.get('TERRAFORM_STATE_ROOT', - os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', '..', ))) - parser.add_argument('--root', - default=default_root, - help='custom root to search for `.tfstate`s in') - - args = parser.parse_args() - - if args.version: - print('%s %s' % (__file__, VERSION)) - parser.exit() - - hosts = iterhosts(iterresources(tfstates(args.root))) - - # Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts - ips = dict(iterips(iterresources(tfstates(args.root)))) - - if ips: - hosts = iter_host_ips(hosts, ips) - - if args.list: - output = query_list(hosts) - if args.nometa: - del output['_meta'] - print(json.dumps(output, indent=4 if args.pretty else None)) - elif args.host: - output = query_host(hosts, args.host) - print(json.dumps(output, indent=4 if args.pretty else None)) - elif args.hostfile: - output = query_hostfile(hosts) - print(output) - - parser.exit() - - -if __name__ == '__main__': - main() diff --git a/contrib/terraform/upcloud/README.md b/contrib/terraform/upcloud/README.md deleted file mode 100644 index 4657de991c6..00000000000 --- a/contrib/terraform/upcloud/README.md +++ /dev/null @@ -1,173 +0,0 @@ -# Kubernetes on UpCloud with Terraform - -Provision a Kubernetes cluster on [UpCloud](https://upcloud.com/) using Terraform and Kubespray - -## Requirements - -* Terraform 0.13.0 or newer - -## Quickstart - -NOTE: Assumes you are at the root of the kubespray repo. - -For authentication in your cluster you can use the environment variables. - -```bash -export TF_VAR_UPCLOUD_USERNAME=username -export TF_VAR_UPCLOUD_PASSWORD=password -``` - -To allow API access to your UpCloud account, you need to allow API connections by visiting [Account-page](https://hub.upcloud.com/account) in your UpCloud Hub. - -Copy the cluster configuration file. - -```bash -CLUSTER=my-upcloud-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/upcloud/cluster-settings.tfvars inventory/$CLUSTER/ -export ANSIBLE_CONFIG=ansible.cfg -cd inventory/$CLUSTER -``` - -Edit `cluster-settings.tfvars` to match your requirement. - -Run Terraform to create the infrastructure. - -```bash -terraform init ../../contrib/terraform/upcloud -terraform apply --var-file cluster-settings.tfvars \ - -state=tfstate-$CLUSTER.tfstate \ - ../../contrib/terraform/upcloud/ -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can use the inventory file with kubespray to set up a cluster. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -You can setup Kubernetes with kubespray using the generated inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Teardown - -You can teardown your infrastructure using the following Terraform command: - -```bash -terraform destroy --var-file cluster-settings.tfvars \ - -state=tfstate-$CLUSTER.tfstate \ - ../../contrib/terraform/upcloud/ -``` - -## Variables - -* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix -* `template_name`: The name or UUID of a base image -* `username`: a user to access the nodes, defaults to "ubuntu" -* `private_network_cidr`: CIDR to use for the private network, defaults to "172.16.0.0/24" -* `dns_servers`: DNS servers that will be used by the nodes. Until [this is solved](https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562) this is done using user_data to reconfigure resolved. Defaults to `[]` -* `use_public_ips`: If a NIC connencted to the Public network should be attached to all nodes by default. Can be overridden by `force_public_ip` if this is set to `false`. Defaults to `true` -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `zone`: The zone where to run the cluster -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `plan`: Preconfigured cpu/mem plan to use (disables `cpu` and `mem` attributes below) - * `cpu`: number of cpu cores - * `mem`: memory size in MB - * `disk_size`: The size of the storage in GB - * `force_public_ip`: If `use_public_ips` is set to `false`, this forces a public NIC onto the machine anyway when set to `true`. Useful if you're migrating from public nodes to only private. Defaults to `false` - * `dns_servers`: This works the same way as the global `dns_severs` but only applies to a single node. If set to `[]` while the global `dns_servers` is set to something else, then it will not add the user_data and thus will not be recreated. Useful if you're migrating from public nodes to only private. Defaults to `null` - * `additional_disks`: Additional disks to attach to the node. - * `size`: The size of the additional disk in GB - * `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm) -* `firewall_enabled`: Enable firewall rules -* `firewall_default_deny_in`: Set the firewall to deny inbound traffic by default. Automatically adds UpCloud DNS server and NTP port allowlisting. -* `firewall_default_deny_out`: Set the firewall to deny outbound traffic by default. -* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `master_allowed_ports`: List of port ranges that should be allowed to access the masters - * `protocol`: Protocol *(tcp|udp|icmp)* - * `port_range_min`: Start of port range to allow - * `port_range_max`: End of port range to allow - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `worker_allowed_ports`: List of port ranges that should be allowed to access the workers - * `protocol`: Protocol *(tcp|udp|icmp)* - * `port_range_min`: Start of port range to allow - * `port_range_max`: End of port range to allow - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `loadbalancer_enabled`: Enable managed load balancer -* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)* -* `loadbalancer_legacy_network`: If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false (default value) -* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends - * `port`: Port to load balance. - * `target_port`: Port to the backend servers. - * `backend_servers`: List of servers that traffic to the port should be forwarded to. - * `proxy_protocol`: If the loadbalancer should set up the backend using proxy protocol. -* `router_enable`: If a router should be connected to the private network or not -* `gateways`: Gateways that should be connected to the router, requires router_enable is set to true - * `features`: List of features for the gateway - * `plan`: Plan to use for the gateway - * `connections`: The connections and tunnel to create for the gateway - * `type`: What type of connection - * `local_routes`: Map of local routes for the connection - * `type`: Type of route - * `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix - * `remote_routes`: Map of local routes for the connection - * `type`: Type of route - * `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix - * `tunnels`: The tunnels to create for this connection - * `remote_address`: The remote address for the tunnel - * `ipsec_properties`: Set properties of IPSec, if not set, defaults will be used - * `child_rekey_time`: IKE child SA rekey time in seconds - * `dpd_delay`: Delay before sending Dead Peer Detection packets if no traffic is detected, in seconds - * `dpd_timeout`: Timeout period for DPD reply before considering the peer to be dead, in seconds - * `ike_lifetime`: Maximum IKE SA lifetime in seconds() - * `rekey_time`: IKE SA rekey time in seconds - * `phase1_algorithms`: List of Phase 1: Proposal algorithms - * `phase1_dh_group_numbers`: List of Phase 1 Diffie-Hellman group numbers - * `phase1_integrity_algorithms`: List of Phase 1 integrity algorithms - * `phase2_algorithms`: List of Phase 2: Security Association algorithms - * `phase2_dh_group_numbers`: List of Phase 2 Diffie-Hellman group numbers - * `phase2_integrity_algorithms`: List of Phase 2 integrity algorithms -* `gateway_vpn_psks`: Separate variable for providing psks for connection tunnels. Environment variable can be exported in the following format `export TF_VAR_gateway_vpn_psks='{"${gateway-name}-${connecton-name}-tunnel":{psk:"..."}}'` -* `static_routes`: Static routes to apply to the router, requires `router_enable` is set to true -* `network_peerings`: Other UpCloud private networks to peer with, requires `router_enable` is set to true -* `server_groups`: Group servers together - * `servers`: The servers that should be included in the group. - * `anti_affinity_policy`: Defines if a server group is an anti-affinity group. Setting this to "strict" or yes" will result in all servers in the group being placed on separate compute hosts. The value can be "strict", "yes" or "no". "strict" refers to strict policy doesn't allow servers in the same server group to be on the same host. "yes" refers to best-effort policy and tries to put servers on different hosts, but this is not guaranteed. - -## Migration - -When `null_resource.inventories` and `data.template_file.inventory` was changed to `local_file.inventory` the old state file needs to be cleaned of the old state. -The error messages you'll see if you encounter this is: - -```text -Error: failed to read schema for null_resource.inventories in registry.terraform.io/hashicorp/null: failed to instantiate provider "registry.terraform.io/hashicorp/null" to obtain schema: unavailable provider "registry.terraform.io/hashicorp/null" -Error: failed to read schema for data.template_file.inventory in registry.terraform.io/hashicorp/template: failed to instantiate provider "registry.terraform.io/hashicorp/template" to obtain schema: unavailable provider "registry.terraform.io/hashicorp/template" -``` - -This can be fixed with the following lines - -```bash -terraform state rm -state=terraform.tfstate null_resource.inventories -terraform state rm -state=terraform.tfstate data.template_file.inventory -``` - -### Public to Private only migration - -Since there's no way to remove the public NIC on a machine without recreating its private NIC it's not possible to inplace change a cluster to only use private IPs. -The way to migrate is to first set `use_public_ips` to `false`, `dns_servers` to some DNS servers and then update all existing servers to have `force_public_ip` set to `true` and `dns_severs` set to `[]`. -After that you can add new nodes without `force_public_ip` and `dns_servers` set and create them. -Add the new nodes into the cluster and when all of them are added, remove the old nodes. diff --git a/contrib/terraform/upcloud/cluster-settings.tfvars b/contrib/terraform/upcloud/cluster-settings.tfvars deleted file mode 100644 index 7c592462816..00000000000 --- a/contrib/terraform/upcloud/cluster-settings.tfvars +++ /dev/null @@ -1,198 +0,0 @@ -# See: https://developers.upcloud.com/1.3/5-zones/ -zone = "fi-hel1" -private_cloud = false - -# Only used if private_cloud = true, public zone equivalent -# For example use finnish public zone for finnish private zone -public_zone = "fi-hel2" - -username = "ubuntu" - -# Prefix to use for all resources to separate them from other resources -prefix = "kubespray" - -inventory_file = "inventory.ini" - -# Set the operating system using UUID or exact name -template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa public key 1", - "ssh-rsa public key 2", -] - -# check list of available plan https://developers.upcloud.com/1.3/7-plans/ -machines = { - "control-plane-0" : { - "node_type" : "master", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : {} - }, - "worker-0" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-1" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-2" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - } -} - -firewall_enabled = false -firewall_default_deny_in = false -firewall_default_deny_out = false - -master_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -k8s_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -master_allowed_ports = [] -worker_allowed_ports = [] - -loadbalancer_enabled = false -loadbalancer_plan = "development" -loadbalancers = { - # "http" : { - # "proxy_protocol" : false - # "port" : 80, - # "target_port" : 80, - # "backend_servers" : [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # } -} - -server_groups = { - # "control-plane" = { - # servers = [ - # "control-plane-0" - # ] - # anti_affinity_policy = "strict" - # }, - # "workers" = { - # servers = [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # anti_affinity_policy = "yes" - # } -} - -router_enable = false -gateways = { - # "gateway" : { - # features: [ "vpn" ] - # plan = "production" - # connections = { - # "connection" = { - # name = "connection" - # type = "ipsec" - # remote_routes = { - # "them" = { - # type = "static" - # static_network = "1.2.3.4/24" - # } - # } - # local_routes = { - # "me" = { - # type = "static" - # static_network = "4.3.2.1/24" - # } - # } - # tunnels = { - # "tunnel1" = { - # remote_address = "1.2.3.4" - # } - # } - # } - # } - # } -} -# gateway_vpn_psks = {} # Should be loaded as an environment variable -static_routes = { - # "route": { - # route: "1.2.3.4/24" - # nexthop: "4.3.2.1" - # } -} -network_peerings = { - # "peering": { - # remote_network: "uuid" - # } -} diff --git a/contrib/terraform/upcloud/main.tf b/contrib/terraform/upcloud/main.tf deleted file mode 100644 index 9ea73b7bbf2..00000000000 --- a/contrib/terraform/upcloud/main.tf +++ /dev/null @@ -1,65 +0,0 @@ - -terraform { - required_version = ">= 0.13.0" -} -provider "upcloud" { - # Your UpCloud credentials are read from environment variables: - username = var.UPCLOUD_USERNAME - password = var.UPCLOUD_PASSWORD -} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - - prefix = var.prefix - zone = var.zone - private_cloud = var.private_cloud - public_zone = var.public_zone - - template_name = var.template_name - username = var.username - - private_network_cidr = var.private_network_cidr - dns_servers = var.dns_servers - use_public_ips = var.use_public_ips - - machines = var.machines - - ssh_public_keys = var.ssh_public_keys - - firewall_enabled = var.firewall_enabled - firewall_default_deny_in = var.firewall_default_deny_in - firewall_default_deny_out = var.firewall_default_deny_out - master_allowed_remote_ips = var.master_allowed_remote_ips - k8s_allowed_remote_ips = var.k8s_allowed_remote_ips - bastion_allowed_remote_ips = var.bastion_allowed_remote_ips - master_allowed_ports = var.master_allowed_ports - worker_allowed_ports = var.worker_allowed_ports - - loadbalancer_enabled = var.loadbalancer_enabled - loadbalancer_plan = var.loadbalancer_plan - loadbalancer_legacy_network = var.loadbalancer_legacy_network - loadbalancers = var.loadbalancers - - router_enable = var.router_enable - gateways = var.gateways - gateway_vpn_psks = var.gateway_vpn_psks - static_routes = var.static_routes - network_peerings = var.network_peerings - - server_groups = var.server_groups -} - -# -# Generate ansible inventory -# - -resource "local_file" "inventory" { - content = templatefile("${path.module}/templates/inventory.tpl", { - master_ip = module.kubernetes.master_ip - worker_ip = module.kubernetes.worker_ip - bastion_ip = module.kubernetes.bastion_ip - username = var.username - }) - filename = var.inventory_file -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 37ab2357385..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,904 +0,0 @@ -locals { - # Create a list of all disks to create - disks = flatten([ - for node_name, machine in var.machines : [ - for disk_name, disk in machine.additional_disks : { - disk = disk - disk_name = disk_name - node_name = node_name - } - ] - ]) - - lb_backend_servers = flatten([ - for lb_name, loadbalancer in var.loadbalancers : [ - for backend_server in loadbalancer.backend_servers : { - port = loadbalancer.target_port - lb_name = lb_name - server_name = backend_server - } - ] - ]) - - gateway_connections = flatten([ - for gateway_name, gateway in var.gateways : [ - for connection_name, connection in gateway.connections : { - "gateway_id" = upcloud_gateway.gateway[gateway_name].id - "gateway_name" = gateway_name - "connection_name" = connection_name - "type" = connection.type - "local_routes" = connection.local_routes - "remote_routes" = connection.remote_routes - } - ] - ]) - - gateway_connection_tunnels = flatten([ - for gateway_name, gateway in var.gateways : [ - for connection_name, connection in gateway.connections : [ - for tunnel_name, tunnel in connection.tunnels : { - "gateway_id" = upcloud_gateway.gateway[gateway_name].id - "gateway_name" = gateway_name - "connection_id" = upcloud_gateway_connection.gateway_connection["${gateway_name}-${connection_name}"].id - "connection_name" = connection_name - "tunnel_name" = tunnel_name - "local_address_name" = tolist(upcloud_gateway.gateway[gateway_name].address).0.name - "remote_address" = tunnel.remote_address - "ipsec_properties" = tunnel.ipsec_properties - } - ] - ] - ]) - - # If prefix is set, all resources will be prefixed with "${var.prefix}-" - # Else don't prefix with anything - resource-prefix = "%{if var.prefix != ""}${var.prefix}-%{endif}" - - master_ip = { - for instance in upcloud_server.master : - instance.hostname => { - for nic in instance.network_interface : - nic.type => nic.ip_address - if nic.ip_address != null - } - } - worker_ip = { - for instance in upcloud_server.worker : - instance.hostname => { - for nic in instance.network_interface : - nic.type => nic.ip_address - if nic.ip_address != null - } - } - - bastion_ip = { - for instance in upcloud_server.bastion : - instance.hostname => { - for nic in instance.network_interface : - nic.type => nic.ip_address - if nic.ip_address != null - } - } - - node_user_data = { - for name, machine in var.machines : - name => < 0 ) || ( length(var.dns_servers) > 0 && machine.dns_servers == null ) ~} -#!/bin/bash -echo -e "[Resolve]\nDNS=${ join(" ", length(machine.dns_servers != null ? machine.dns_servers : []) > 0 ? machine.dns_servers : var.dns_servers) }" > /etc/systemd/resolved.conf - -systemctl restart systemd-resolved -%{ endif ~} -EOF - } -} - -resource "upcloud_network" "private" { - name = "${local.resource-prefix}k8s-network" - zone = var.zone - - ip_network { - address = var.private_network_cidr - dhcp_default_route = var.router_enable - # TODO: When support for dhcp_dns for private networks are in, remove the user_data and enable it here. - # See more here https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562 - # dhcp_dns = length(var.private_network_dns) > 0 ? var.private_network_dns : null - dhcp = true - family = "IPv4" - } - - router = var.router_enable ? upcloud_router.router[0].id : null -} - -resource "upcloud_storage" "additional_disks" { - for_each = { - for disk in local.disks : "${disk.node_name}_${disk.disk_name}" => disk.disk - } - - size = each.value.size - tier = each.value.tier - title = "${local.resource-prefix}${each.key}" - zone = var.zone -} - -resource "upcloud_server" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - hostname = "${local.resource-prefix}${each.key}" - plan = each.value.plan - cpu = each.value.cpu - mem = each.value.mem - zone = var.zone - server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id - - template { - storage = var.template_name - size = each.value.disk_size - } - - dynamic "network_interface" { - for_each = each.value.force_public_ip || var.use_public_ips ? [1] : [] - - content { - type = "public" - } - } - - # Private network interface - network_interface { - type = "private" - network = upcloud_network.private.id - } - - # Ignore volumes created by csi-driver - lifecycle { - ignore_changes = [storage_devices] - } - - firewall = var.firewall_enabled - - dynamic "storage_devices" { - for_each = { - for disk_key_name, disk in upcloud_storage.additional_disks : - disk_key_name => disk - # Only add the disk if it matches the node name in the start of its name - if length(regexall("^${each.key}_.+", disk_key_name)) > 0 - } - - content { - storage = storage_devices.value.id - } - } - - # Include at least one public SSH key - login { - user = var.username - keys = var.ssh_public_keys - create_password = false - } - - metadata = local.node_user_data[each.key] != "" ? true : null - user_data = local.node_user_data[each.key] != "" ? local.node_user_data[each.key] : null -} - -resource "upcloud_server" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - hostname = "${local.resource-prefix}${each.key}" - plan = each.value.plan - cpu = each.value.cpu - mem = each.value.mem - zone = var.zone - server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id - - - template { - storage = var.template_name - size = each.value.disk_size - } - - dynamic "network_interface" { - for_each = each.value.force_public_ip || var.use_public_ips ? [1] : [] - - content { - type = "public" - } - } - - # Private network interface - network_interface { - type = "private" - network = upcloud_network.private.id - } - - # Ignore volumes created by csi-driver - lifecycle { - ignore_changes = [storage_devices] - } - - firewall = var.firewall_enabled - - dynamic "storage_devices" { - for_each = { - for disk_key_name, disk in upcloud_storage.additional_disks : - disk_key_name => disk - # Only add the disk if it matches the node name in the start of its name - if length(regexall("^${each.key}_.+", disk_key_name)) > 0 - } - - content { - storage = storage_devices.value.id - } - } - - # Include at least one public SSH key - login { - user = var.username - keys = var.ssh_public_keys - create_password = false - } - - metadata = local.node_user_data[each.key] != "" ? true : null - user_data = local.node_user_data[each.key] != "" ? local.node_user_data[each.key] : null -} - -resource "upcloud_server" "bastion" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "bastion" - } - - hostname = "${local.resource-prefix}${each.key}" - plan = each.value.plan - cpu = each.value.cpu - mem = each.value.mem - zone = var.zone - server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id - - - template { - storage = var.template_name - size = each.value.disk_size - } - - # Private network interface - network_interface { - type = "private" - network = upcloud_network.private.id - } - - # Private network interface - network_interface { - type = "public" - } - - firewall = var.firewall_enabled - - dynamic "storage_devices" { - for_each = { - for disk_key_name, disk in upcloud_storage.additional_disks : - disk_key_name => disk - # Only add the disk if it matches the node name in the start of its name - if length(regexall("^${each.key}_.+", disk_key_name)) > 0 - } - - content { - storage = storage_devices.value.id - } - } - - # Include at least one public SSH key - login { - user = var.username - keys = var.ssh_public_keys - create_password = false - } -} - -resource "upcloud_firewall_rules" "master" { - for_each = upcloud_server.master - server_id = each.value.id - - dynamic "firewall_rule" { - for_each = var.master_allowed_remote_ips - - content { - action = "accept" - comment = "Allow master API access from this network" - destination_port_end = "6443" - destination_port_start = "6443" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.master_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Deny master API access from other networks" - destination_port_end = "6443" - destination_port_start = "6443" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.k8s_allowed_remote_ips - - content { - action = "accept" - comment = "Allow SSH from this network" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Deny SSH from other networks" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.master_allowed_ports - - content { - action = "accept" - comment = "Allow access on this port" - destination_port_end = firewall_rule.value.port_range_max - destination_port_start = firewall_rule.value.port_range_min - direction = "in" - family = "IPv4" - protocol = firewall_rule.value.protocol - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.40.9" - source_address_start = "94.237.40.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.127.9" - source_address_start = "94.237.127.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3540:53::1" - source_address_start = "2a04:3540:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3544:53::1" - source_address_start = "2a04:3544:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - } - } - - firewall_rule { - action = var.firewall_default_deny_in ? "drop" : "accept" - direction = "in" - } - - firewall_rule { - action = var.firewall_default_deny_out ? "drop" : "accept" - direction = "out" - } -} - -resource "upcloud_firewall_rules" "k8s" { - for_each = upcloud_server.worker - server_id = each.value.id - - dynamic "firewall_rule" { - for_each = var.k8s_allowed_remote_ips - - content { - action = "accept" - comment = "Allow SSH from this network" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Deny SSH from other networks" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.worker_allowed_ports - - content { - action = "accept" - comment = "Allow access on this port" - destination_port_end = firewall_rule.value.port_range_max - destination_port_start = firewall_rule.value.port_range_min - direction = "in" - family = "IPv4" - protocol = firewall_rule.value.protocol - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.40.9" - source_address_start = "94.237.40.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.127.9" - source_address_start = "94.237.127.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3540:53::1" - source_address_start = "2a04:3540:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3544:53::1" - source_address_start = "2a04:3544:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - } - } - - firewall_rule { - action = var.firewall_default_deny_in ? "drop" : "accept" - direction = "in" - } - - firewall_rule { - action = var.firewall_default_deny_out ? "drop" : "accept" - direction = "out" - } -} - -resource "upcloud_firewall_rules" "bastion" { - for_each = upcloud_server.bastion - server_id = each.value.id - - dynamic "firewall_rule" { - for_each = var.bastion_allowed_remote_ips - - content { - action = "accept" - comment = "Allow bastion SSH access from this network" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.bastion_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Drop bastion SSH access from other networks" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - firewall_rule { - action = var.firewall_default_deny_in ? "drop" : "accept" - direction = "in" - } - - firewall_rule { - action = var.firewall_default_deny_out ? "drop" : "accept" - direction = "out" - } -} - -resource "upcloud_loadbalancer" "lb" { - count = var.loadbalancer_enabled ? 1 : 0 - configured_status = "started" - name = "${local.resource-prefix}lb" - plan = var.loadbalancer_plan - zone = var.private_cloud ? var.public_zone : var.zone - network = var.loadbalancer_legacy_network ? upcloud_network.private.id : null - - dynamic "networks" { - for_each = var.loadbalancer_legacy_network ? [] : [1] - - content { - name = "Private-Net" - type = "private" - family = "IPv4" - network = upcloud_network.private.id - } - } - - dynamic "networks" { - for_each = var.loadbalancer_legacy_network ? [] : [1] - - content { - name = "Public-Net" - type = "public" - family = "IPv4" - } - } - - lifecycle { - ignore_changes = [ maintenance_dow, maintenance_time ] - } -} - -resource "upcloud_loadbalancer_backend" "lb_backend" { - for_each = var.loadbalancer_enabled ? var.loadbalancers : {} - - loadbalancer = upcloud_loadbalancer.lb[0].id - name = "lb-backend-${each.key}" - properties { - outbound_proxy_protocol = each.value.proxy_protocol ? "v2" : "" - } -} - -resource "upcloud_loadbalancer_frontend" "lb_frontend" { - for_each = var.loadbalancer_enabled ? var.loadbalancers : {} - - loadbalancer = upcloud_loadbalancer.lb[0].id - name = "lb-frontend-${each.key}" - mode = "tcp" - port = each.value.port - default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name - - dynamic "networks" { - for_each = var.loadbalancer_legacy_network ? [] : [1] - - content { - name = "Public-Net" - } - } - - dynamic "networks" { - for_each = each.value.allow_internal_frontend ? [1] : [] - - content{ - name = "Private-Net" - } - } -} - -resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" { - for_each = { - for be_server in local.lb_backend_servers : - "${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server - if var.loadbalancer_enabled - } - - backend = upcloud_loadbalancer_backend.lb_backend[each.value.lb_name].id - name = "${local.resource-prefix}${each.key}" - ip = merge(local.master_ip, local.worker_ip)["${local.resource-prefix}${each.value.server_name}"].private - port = each.value.port - weight = 100 - max_sessions = var.loadbalancer_plan == "production-small" ? 50000 : 1000 - enabled = true -} - -resource "upcloud_server_group" "server_groups" { - for_each = var.server_groups - title = each.key - anti_affinity_policy = each.value.anti_affinity_policy - labels = {} - # Managed upstream via upcloud_server resource - members = [] - lifecycle { - ignore_changes = [members] - } -} - -resource "upcloud_router" "router" { - count = var.router_enable ? 1 : 0 - - name = "${local.resource-prefix}router" - - dynamic "static_route" { - for_each = var.static_routes - - content { - name = static_route.key - - nexthop = static_route.value["nexthop"] - route = static_route.value["route"] - } - } - -} - -resource "upcloud_gateway" "gateway" { - for_each = var.router_enable ? var.gateways : {} - name = "${local.resource-prefix}${each.key}-gateway" - zone = var.private_cloud ? var.public_zone : var.zone - - features = each.value.features - plan = each.value.plan - - router { - id = upcloud_router.router[0].id - } -} - -resource "upcloud_gateway_connection" "gateway_connection" { - for_each = { - for gc in local.gateway_connections : "${gc.gateway_name}-${gc.connection_name}" => gc - } - - gateway = each.value.gateway_id - name = "${local.resource-prefix}${each.key}-gateway-connection" - type = each.value.type - - dynamic "local_route" { - for_each = each.value.local_routes - - content { - name = local_route.key - type = local_route.value["type"] - static_network = local_route.value["static_network"] - } - } - - dynamic "remote_route" { - for_each = each.value.remote_routes - - content { - name = remote_route.key - type = remote_route.value["type"] - static_network = remote_route.value["static_network"] - } - } -} - -resource "upcloud_gateway_connection_tunnel" "gateway_connection_tunnel" { - for_each = { - for gct in local.gateway_connection_tunnels : "${gct.gateway_name}-${gct.connection_name}-${gct.tunnel_name}-tunnel" => gct - } - - connection_id = each.value.connection_id - name = each.key - local_address_name = each.value.local_address_name - remote_address = each.value.remote_address - - ipsec_auth_psk { - psk = var.gateway_vpn_psks[each.key].psk - } - - dynamic "ipsec_properties" { - for_each = each.value.ipsec_properties != null ? { "ip": each.value.ipsec_properties } : {} - - content { - child_rekey_time = ipsec_properties.value["child_rekey_time"] - dpd_delay = ipsec_properties.value["dpd_delay"] - dpd_timeout = ipsec_properties.value["dpd_timeout"] - ike_lifetime = ipsec_properties.value["ike_lifetime"] - rekey_time = ipsec_properties.value["rekey_time"] - phase1_algorithms = ipsec_properties.value["phase1_algorithms"] - phase1_dh_group_numbers = ipsec_properties.value["phase1_dh_group_numbers"] - phase1_integrity_algorithms = ipsec_properties.value["phase1_integrity_algorithms"] - phase2_algorithms = ipsec_properties.value["phase2_algorithms"] - phase2_dh_group_numbers = ipsec_properties.value["phase2_dh_group_numbers"] - phase2_integrity_algorithms = ipsec_properties.value["phase2_integrity_algorithms"] - } - } -} - -resource "upcloud_network_peering" "peering" { - for_each = var.network_peerings - - name = "${local.resource-prefix}${each.key}" - - network { - uuid = upcloud_network.private.id - } - - peer_network { - uuid = each.value.remote_network - } -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf deleted file mode 100644 index e75b9faa077..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ip" { - value = local.master_ip -} - -output "worker_ip" { - value = local.worker_ip -} - -output "bastion_ip" { - value = local.bastion_ip -} - -output "loadbalancer_domain" { - value = var.loadbalancer_enabled ? upcloud_loadbalancer.lb[0].dns_name : null -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index eeb1a70c4f3..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,202 +0,0 @@ -variable "prefix" { - type = string -} - -variable "zone" { - type = string -} - -variable "private_cloud" { - type = bool -} - -variable "public_zone" { - type = string -} - -variable "template_name" {} - -variable "username" {} - -variable "private_network_cidr" {} - -variable "dns_servers" {} - -variable "use_public_ips" {} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - plan = string - cpu = optional(number) - mem = optional(number) - disk_size = number - server_group : string - force_public_ip : optional(bool, false) - dns_servers : optional(set(string)) - additional_disks = map(object({ - size = number - tier = string - })) - })) -} - -variable "ssh_public_keys" { - type = list(string) -} - -variable "firewall_enabled" { - type = bool -} - -variable "master_allowed_remote_ips" { - type = list(object({ - start_address = string - end_address = string - })) -} - -variable "k8s_allowed_remote_ips" { - type = list(object({ - start_address = string - end_address = string - })) -} - -variable "bastion_allowed_remote_ips" { - type = list(object({ - start_address = string - end_address = string - })) -} - -variable "master_allowed_ports" { - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "worker_allowed_ports" { - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "firewall_default_deny_in" { - type = bool -} - -variable "firewall_default_deny_out" { - type = bool -} - -variable "loadbalancer_enabled" { - type = bool -} - -variable "loadbalancer_plan" { - type = string -} - -variable "loadbalancer_legacy_network" { - type = bool - default = false -} - -variable "loadbalancers" { - description = "Load balancers" - - type = map(object({ - proxy_protocol = bool - port = number - target_port = number - allow_internal_frontend = optional(bool) - backend_servers = list(string) - })) -} - -variable "server_groups" { - description = "Server groups" - - type = map(object({ - anti_affinity_policy = string - })) -} - -variable "router_enable" { - description = "If a router should be enabled and connected to the private network or not" - - type = bool -} - -variable "gateways" { - description = "Gateways that should be connected to the router, requires router_enable is set to true" - - type = map(object({ - features = list(string) - plan = optional(string) - connections = optional(map(object({ - type = string - local_routes = optional(map(object({ - type = string - static_network = string - }))) - remote_routes = optional(map(object({ - type = string - static_network = string - }))) - tunnels = optional(map(object({ - remote_address = string - ipsec_properties = optional(object({ - child_rekey_time = number - dpd_delay = number - dpd_timeout = number - ike_lifetime = number - rekey_time = number - phase1_algorithms = set(string) - phase1_dh_group_numbers = set(string) - phase1_integrity_algorithms = set(string) - phase2_algorithms = set(string) - phase2_dh_group_numbers = set(string) - phase2_integrity_algorithms = set(string) - })) - }))) - }))) - })) -} - -variable "gateway_vpn_psks" { - description = "Separate variable for providing psks for connection tunnels" - - type = map(object({ - psk = string - })) - default = {} - sensitive = true -} - -variable "static_routes" { - description = "Static routes to apply to the router, requires router_enable is set to true" - - type = map(object({ - nexthop = string - route = string - })) -} - -variable "network_peerings" { - description = "Other UpCloud private networks to peer with, requires router_enable is set to true" - - type = map(object({ - remote_network = string - })) -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 4db5980d212..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ - -terraform { - required_providers { - upcloud = { - source = "UpCloudLtd/upcloud" - version = "~>5.9.0" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/upcloud/output.tf b/contrib/terraform/upcloud/output.tf deleted file mode 100644 index d56d6e44619..00000000000 --- a/contrib/terraform/upcloud/output.tf +++ /dev/null @@ -1,16 +0,0 @@ - -output "master_ip" { - value = module.kubernetes.master_ip -} - -output "worker_ip" { - value = module.kubernetes.worker_ip -} - -output "bastion_ip" { - value = module.kubernetes.bastion_ip -} - -output "loadbalancer_domain" { - value = module.kubernetes.loadbalancer_domain -} diff --git a/contrib/terraform/upcloud/sample-inventory/cluster.tfvars b/contrib/terraform/upcloud/sample-inventory/cluster.tfvars deleted file mode 100644 index d1546004bcc..00000000000 --- a/contrib/terraform/upcloud/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,149 +0,0 @@ -# See: https://developers.upcloud.com/1.3/5-zones/ -zone = "fi-hel1" -username = "ubuntu" - -# Prefix to use for all resources to separate them from other resources -prefix = "kubespray" - -inventory_file = "inventory.ini" - -# Set the operating system using UUID or exact name -template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -# check list of available plan https://developers.upcloud.com/1.3/7-plans/ -machines = { - "control-plane-0" : { - "node_type" : "master", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : {} - }, - "worker-0" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-1" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-2" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - } -} - -firewall_enabled = false -firewall_default_deny_in = false -firewall_default_deny_out = false - - -master_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -k8s_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -master_allowed_ports = [] -worker_allowed_ports = [] - -loadbalancer_enabled = false -loadbalancer_plan = "development" -loadbalancers = { - # "http" : { - # "port" : 80, - # "target_port" : 80, - # "backend_servers" : [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # } -} - -server_groups = { - # "control-plane" = { - # servers = [ - # "control-plane-0" - # ] - # anti_affinity_policy = "strict" - # }, - # "workers" = { - # servers = [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # anti_affinity_policy = "yes" - # } -} diff --git a/contrib/terraform/upcloud/sample-inventory/group_vars b/contrib/terraform/upcloud/sample-inventory/group_vars deleted file mode 120000 index 0d510620513..00000000000 --- a/contrib/terraform/upcloud/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars/ \ No newline at end of file diff --git a/contrib/terraform/upcloud/templates/inventory.tpl b/contrib/terraform/upcloud/templates/inventory.tpl deleted file mode 100644 index 02e4def3410..00000000000 --- a/contrib/terraform/upcloud/templates/inventory.tpl +++ /dev/null @@ -1,33 +0,0 @@ -[all] -%{ for name, ips in master_ip ~} -${name} ansible_user=${username} ansible_host=${lookup(ips, "public", ips.private)} ip=${ips.private} -%{ endfor ~} -%{ for name, ips in worker_ip ~} -${name} ansible_user=${username} ansible_host=${lookup(ips, "public", ips.private)} ip=${ips.private} -%{ endfor ~} - -[kube_control_plane] -%{ for name, ips in master_ip ~} -${name} -%{ endfor ~} - -[etcd] -%{ for name, ips in master_ip ~} -${name} -%{ endfor ~} - -[kube_node] -%{ for name, ips in worker_ip ~} -${name} -%{ endfor ~} - -[k8s_cluster:children] -kube_control_plane -kube_node - -%{ if length(bastion_ip) > 0 ~} -[bastion] -%{ for name, ips in bastion_ip ~} -bastion ansible_user=${username} ansible_host=${ips.public} -%{ endfor ~} -%{ endif ~} diff --git a/contrib/terraform/upcloud/variables.tf b/contrib/terraform/upcloud/variables.tf deleted file mode 100644 index a4ec44efc77..00000000000 --- a/contrib/terraform/upcloud/variables.tf +++ /dev/null @@ -1,259 +0,0 @@ -variable "prefix" { - type = string - default = "kubespray" - - description = "Prefix that is used to distinguish these resources from others" -} - -variable "zone" { - description = "The zone where to run the cluster" -} - -variable "private_cloud" { - description = "Whether the environment is in the private cloud region" - default = false -} - -variable "public_zone" { - description = "The public zone equivalent if the cluster is running in a private cloud zone" -} - -variable "template_name" { - description = "Block describing the preconfigured operating system" -} - -variable "username" { - description = "The username to use for the nodes" - default = "ubuntu" -} - -variable "private_network_cidr" { - description = "CIDR to use for the private network" - default = "172.16.0.0/24" -} - -variable "dns_servers" { - description = "DNS servers that will be used by the nodes. Until [this is solved](https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562) this is done using user_data to reconfigure resolved" - - type = set(string) - default = [] -} - -variable "use_public_ips" { - description = "If all nodes should get a public IP" - type = bool - default = true -} - -variable "machines" { - description = "Cluster machines" - - type = map(object({ - node_type = string - plan = string - cpu = optional(number) - mem = optional(number) - disk_size = number - server_group : string - force_public_ip : optional(bool, false) - dns_servers : optional(set(string)) - additional_disks = map(object({ - size = number - tier = string - })) - })) -} - -variable "ssh_public_keys" { - description = "List of public SSH keys which are injected into the VMs." - type = list(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} - -variable "UPCLOUD_USERNAME" { - description = "UpCloud username with API access" -} - -variable "UPCLOUD_PASSWORD" { - description = "Password for UpCloud API user" -} - -variable "firewall_enabled" { - description = "Enable firewall rules" - default = false -} - -variable "master_allowed_remote_ips" { - description = "List of IP start/end addresses allowed to access API of masters" - type = list(object({ - start_address = string - end_address = string - })) - default = [] -} - -variable "k8s_allowed_remote_ips" { - description = "List of IP start/end addresses allowed to SSH to hosts" - type = list(object({ - start_address = string - end_address = string - })) - default = [] -} - -variable "bastion_allowed_remote_ips" { - description = "List of IP start/end addresses allowed to SSH to bastion" - type = list(object({ - start_address = string - end_address = string - })) - default = [] -} - -variable "master_allowed_ports" { - description = "List of ports to allow on masters" - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "worker_allowed_ports" { - description = "List of ports to allow on workers" - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "firewall_default_deny_in" { - description = "Add firewall policies that deny all inbound traffic by default" - default = false -} - -variable "firewall_default_deny_out" { - description = "Add firewall policies that deny all outbound traffic by default" - default = false -} - -variable "loadbalancer_enabled" { - description = "Enable load balancer" - default = false -} - -variable "loadbalancer_plan" { - description = "Load balancer plan (development/production-small)" - default = "development" -} - -variable "loadbalancer_legacy_network" { - description = "If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false" - - type = bool - default = false -} - -variable "loadbalancers" { - description = "Load balancers" - - type = map(object({ - proxy_protocol = bool - port = number - target_port = number - allow_internal_frontend = optional(bool, false) - backend_servers = list(string) - })) - default = {} -} - -variable "server_groups" { - description = "Server groups" - - type = map(object({ - anti_affinity_policy = string - })) - - default = {} -} - -variable "router_enable" { - description = "If a router should be enabled and connected to the private network or not" - - type = bool - default = false -} - -variable "gateways" { - description = "Gateways that should be connected to the router, requires router_enable is set to true" - - type = map(object({ - features = list(string) - plan = optional(string) - connections = optional(map(object({ - type = string - local_routes = optional(map(object({ - type = string - static_network = string - })), {}) - remote_routes = optional(map(object({ - type = string - static_network = string - })), {}) - tunnels = optional(map(object({ - remote_address = string - ipsec_properties = optional(object({ - child_rekey_time = number - dpd_delay = number - dpd_timeout = number - ike_lifetime = number - rekey_time = number - phase1_algorithms = set(string) - phase1_dh_group_numbers = set(string) - phase1_integrity_algorithms = set(string) - phase2_algorithms = set(string) - phase2_dh_group_numbers = set(string) - phase2_integrity_algorithms = set(string) - })) - })), {}) - })), {}) - })) - default = {} -} - -variable "gateway_vpn_psks" { - description = "Separate variable for providing psks for connection tunnels" - - type = map(object({ - psk = string - })) - default = {} - sensitive = true -} - -variable "static_routes" { - description = "Static routes to apply to the router, requires router_enable is set to true" - - type = map(object({ - nexthop = string - route = string - })) - default = {} -} - -variable "network_peerings" { - description = "Other UpCloud private networks to peer with, requires router_enable is set to true" - - type = map(object({ - remote_network = string - })) - default = {} -} diff --git a/contrib/terraform/upcloud/versions.tf b/contrib/terraform/upcloud/versions.tf deleted file mode 100644 index 4db5980d212..00000000000 --- a/contrib/terraform/upcloud/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ - -terraform { - required_providers { - upcloud = { - source = "UpCloudLtd/upcloud" - version = "~>5.9.0" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/vsphere/README.md b/contrib/terraform/vsphere/README.md deleted file mode 100644 index 7aa50d899ea..00000000000 --- a/contrib/terraform/vsphere/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# Kubernetes on vSphere with Terraform - -Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/products/vsphere.html) using Terraform and Kubespray. - -## Overview - -The setup looks like following. - -```text - Kubernetes cluster -+-----------------------+ -| +--------------+ | -| | +--------------+ | -| | | | | -| | | Master/etcd | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -| ^ | -| | | -| v | -| +--------------+ | -| | +--------------+ | -| | | | | -| | | Worker | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -+-----------------------+ -``` - -## Warning - -This setup assumes that the DHCP is disabled in the vSphere cluster and IP addresses have to be provided in the configuration file. - -## Requirements - -* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files) - -## Quickstart - -NOTE: *Assumes you are at the root of the kubespray repo* - -Copy the sample inventory for your cluster and copy the default terraform variables. - -```bash -CLUSTER=my-vsphere-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/vsphere/default.tfvars inventory/$CLUSTER/ -cd inventory/$CLUSTER -``` - -Edit `default.tfvars` to match your setup. You MUST set values specific for you network and vSphere cluster. - -```bash -# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc. -$EDITOR default.tfvars -``` - -For authentication in your vSphere cluster you can use the environment variables. - -```bash -export TF_VAR_vsphere_user=username -export TF_VAR_vsphere_password=password -``` - -Run Terraform to create the infrastructure. - -```bash -terraform init ../../contrib/terraform/vsphere -terraform apply \ - -var-file default.tfvars \ - -state=tfstate-$CLUSTER.tfstate \ - ../../contrib/terraform/vsphere -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can now copy your inventory file and use it with kubespray to set up a cluster. -You can type `terraform output` to find out the IP addresses of the nodes. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -Example to use this with the default sample inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Variables - -### Required - -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `ip`: The IP address of the machine - * `netmask`: The netmask to use (to be used on the right hand side in CIDR notation, e.g., `24`) -* `network`: The name of the network to attach the machines to -* `gateway`: The IP address of the network gateway -* `vsphere_datacenter`: The identifier of vSphere data center -* `vsphere_compute_cluster`: The identifier of vSphere compute cluster -* `vsphere_datastore`: The identifier of vSphere data store -* `vsphere_server`: This is the vCenter server name or address for vSphere API operations. -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `template_name`: The name of a base image (the OVF template be defined in vSphere beforehand) - -### Optional - -* `folder`: Name of the folder to put all machines in (default: `""`) -* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project (default: `"k8s"`) -* `inventory_file`: Name of the generated inventory file for Kubespray to use in the Ansible step (default: `inventory.ini`) -* `dns_primary`: The IP address of primary DNS server (default: `8.8.4.4`) -* `dns_secondary`: The IP address of secondary DNS server (default: `8.8.8.8`) -* `firmware`: Firmware to use (default: `bios`) -* `hardware_version`: The version of the hardware (default: `15`) -* `master_cores`: The number of CPU cores for the master nodes (default: 4) -* `master_memory`: The amount of RAM for the master nodes in MB (default: 4096) -* `master_disk_size`: The amount of disk space for the master nodes in GB (default: 20) -* `worker_cores`: The number of CPU cores for the worker nodes (default: 16) -* `worker_memory`: The amount of RAM for the worker nodes in MB (default: 8192) -* `worker_disk_size`: The amount of disk space for the worker nodes in GB (default: 100) -* `vapp`: Boolean to set the template type to vapp. (Default: false) -* `interface_name`: Name of the interface to configure. (Default: ens192) - -An example variables file can be found `default.tfvars` diff --git a/contrib/terraform/vsphere/default.tfvars b/contrib/terraform/vsphere/default.tfvars deleted file mode 100644 index fa169364114..00000000000 --- a/contrib/terraform/vsphere/default.tfvars +++ /dev/null @@ -1,38 +0,0 @@ -prefix = "k8s" - -inventory_file = "inventory.ini" - -network = "VM Network" - -machines = { - "master-0" : { - "node_type" : "master", - "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.10 - "netmask" : "24" - }, - "worker-0" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.20 - "netmask" : "24" - }, - "worker-1" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.21 - "netmask" : "24" - } -} - -gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.1 - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -vsphere_datacenter = "i-did-not-read-the-docs" -vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster -vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000 -vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com - -template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg diff --git a/contrib/terraform/vsphere/main.tf b/contrib/terraform/vsphere/main.tf deleted file mode 100644 index fb2d8c8327e..00000000000 --- a/contrib/terraform/vsphere/main.tf +++ /dev/null @@ -1,100 +0,0 @@ -provider "vsphere" { - # Username and password set through env vars VSPHERE_USER and VSPHERE_PASSWORD - user = var.vsphere_user - password = var.vsphere_password - - vsphere_server = var.vsphere_server - - # If you have a self-signed cert - allow_unverified_ssl = true -} - -data "vsphere_datacenter" "dc" { - name = var.vsphere_datacenter -} - -data "vsphere_datastore" "datastore" { - name = var.vsphere_datastore - datacenter_id = data.vsphere_datacenter.dc.id -} - -data "vsphere_network" "network" { - name = var.network - datacenter_id = data.vsphere_datacenter.dc.id -} - -data "vsphere_virtual_machine" "template" { - name = var.template_name - datacenter_id = data.vsphere_datacenter.dc.id -} - -data "vsphere_compute_cluster" "compute_cluster" { - name = var.vsphere_compute_cluster - datacenter_id = data.vsphere_datacenter.dc.id -} - -resource "vsphere_resource_pool" "pool" { - name = "${var.prefix}-cluster-pool" - parent_resource_pool_id = data.vsphere_compute_cluster.compute_cluster.resource_pool_id -} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - - prefix = var.prefix - - machines = var.machines - - ## Master ## - master_cores = var.master_cores - master_memory = var.master_memory - master_disk_size = var.master_disk_size - - ## Worker ## - worker_cores = var.worker_cores - worker_memory = var.worker_memory - worker_disk_size = var.worker_disk_size - - ## Global ## - - gateway = var.gateway - dns_primary = var.dns_primary - dns_secondary = var.dns_secondary - - pool_id = vsphere_resource_pool.pool.id - datastore_id = data.vsphere_datastore.datastore.id - - folder = var.folder - guest_id = data.vsphere_virtual_machine.template.guest_id - scsi_type = data.vsphere_virtual_machine.template.scsi_type - network_id = data.vsphere_network.network.id - adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0] - interface_name = var.interface_name - firmware = var.firmware - hardware_version = var.hardware_version - disk_thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned - - template_id = data.vsphere_virtual_machine.template.id - vapp = var.vapp - - ssh_public_keys = var.ssh_public_keys -} - -# -# Generate ansible inventory -# - -resource "local_file" "inventory" { - content = templatefile("${path.module}/templates/inventory.tpl", { - connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d", - keys(module.kubernetes.master_ip), - values(module.kubernetes.master_ip), - range(1, length(module.kubernetes.master_ip) + 1))), - connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s", - keys(module.kubernetes.worker_ip), - values(module.kubernetes.worker_ip))), - list_master = join("\n", formatlist("%s", keys(module.kubernetes.master_ip))), - list_worker = join("\n", formatlist("%s", keys(module.kubernetes.worker_ip))) - }) - filename = var.inventory_file -} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf deleted file mode 100644 index a44c2cfb0a4..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,149 +0,0 @@ -resource "vsphere_virtual_machine" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - - resource_pool_id = var.pool_id - datastore_id = var.datastore_id - - num_cpus = var.worker_cores - memory = var.worker_memory - memory_reservation = var.worker_memory - guest_id = var.guest_id - enable_disk_uuid = "true" # needed for CSI provider - scsi_type = var.scsi_type - folder = var.folder - firmware = var.firmware - hardware_version = var.hardware_version - - wait_for_guest_net_routable = false - wait_for_guest_net_timeout = 0 - - network_interface { - network_id = var.network_id - adapter_type = var.adapter_type - } - - disk { - label = "disk0" - size = var.worker_disk_size - thin_provisioned = var.disk_thin_provisioned - } - - lifecycle { - ignore_changes = [disk] - } - - clone { - template_uuid = var.template_id - } - - cdrom { - client_device = true - } - - dynamic "vapp" { - for_each = var.vapp ? [1] : [] - - content { - properties = { - "user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - } - } - } - - extra_config = { - "isolation.tools.copy.disable" = "FALSE" - "isolation.tools.paste.disable" = "FALSE" - "isolation.tools.setGUIOptions.enable" = "TRUE" - "guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - "guestinfo.userdata.encoding" = "base64" - "guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}", - interface_name = var.interface_name - ip = each.value.ip, - netmask = each.value.netmask, - gw = var.gateway, - dns = var.dns_primary, - ssh_public_keys = var.ssh_public_keys })) - "guestinfo.metadata.encoding" = "base64" - } -} - -resource "vsphere_virtual_machine" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - - resource_pool_id = var.pool_id - datastore_id = var.datastore_id - - num_cpus = var.master_cores - memory = var.master_memory - memory_reservation = var.master_memory - guest_id = var.guest_id - enable_disk_uuid = "true" # needed for CSI provider - scsi_type = var.scsi_type - folder = var.folder - firmware = var.firmware - hardware_version = var.hardware_version - - wait_for_guest_net_routable = false - wait_for_guest_net_timeout = 0 - - network_interface { - network_id = var.network_id - adapter_type = var.adapter_type - } - - disk { - label = "disk0" - size = var.master_disk_size - thin_provisioned = var.disk_thin_provisioned - } - - lifecycle { - ignore_changes = [disk] - } - - clone { - template_uuid = var.template_id - } - - cdrom { - client_device = true - } - - dynamic "vapp" { - for_each = var.vapp ? [1] : [] - - content { - properties = { - "user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - } - } - } - - extra_config = { - "isolation.tools.copy.disable" = "FALSE" - "isolation.tools.paste.disable" = "FALSE" - "isolation.tools.setGUIOptions.enable" = "TRUE" - "guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - "guestinfo.userdata.encoding" = "base64" - "guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}", - interface_name = var.interface_name - ip = each.value.ip, - netmask = each.value.netmask, - gw = var.gateway, - dns = var.dns_primary, - ssh_public_keys = var.ssh_public_keys })) - "guestinfo.metadata.encoding" = "base64" - } -} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf deleted file mode 100644 index 93752ab1e31..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ip" { - value = { - for name, machine in var.machines : - "${var.prefix}-${name}" => machine.ip - if machine.node_type == "master" - } -} - -output "worker_ip" { - value = { - for name, machine in var.machines : - "${var.prefix}-${name}" => machine.ip - if machine.node_type == "worker" - } -} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl b/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl deleted file mode 100644 index 5f809af6a92..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl +++ /dev/null @@ -1,6 +0,0 @@ -#cloud-config - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl b/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl deleted file mode 100644 index 1553f08fe0a..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl +++ /dev/null @@ -1,14 +0,0 @@ -instance-id: ${hostname} -local-hostname: ${hostname} -network: - version: 2 - ethernets: - ${interface_name}: - match: - name: ${interface_name} - dhcp4: false - addresses: - - ${ip}/${netmask} - gateway4: ${gw} - nameservers: - addresses: [${dns}] diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl b/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl deleted file mode 100644 index 07d0778aa65..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl +++ /dev/null @@ -1,24 +0,0 @@ -#cloud-config - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} - -write_files: - - path: /etc/netplan/10-user-network.yaml - content: |. - network: - version: 2 - ethernets: - ${interface_name}: - dhcp4: false #true to use dhcp - addresses: - - ${ip}/${netmask} - gateway4: ${gw} # Set gw here - nameservers: - addresses: - - ${dns} # Set DNS ip address here - -runcmd: - - netplan apply diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index cb99142321c..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,43 +0,0 @@ -## Global ## -variable "prefix" {} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - ip = string - netmask = string - })) -} - -variable "gateway" {} -variable "dns_primary" {} -variable "dns_secondary" {} -variable "pool_id" {} -variable "datastore_id" {} -variable "guest_id" {} -variable "scsi_type" {} -variable "network_id" {} -variable "interface_name" {} -variable "adapter_type" {} -variable "disk_thin_provisioned" {} -variable "template_id" {} -variable "vapp" { - type = bool -} -variable "firmware" {} -variable "folder" {} -variable "ssh_public_keys" { - type = list(string) -} -variable "hardware_version" {} - -## Master ## -variable "master_cores" {} -variable "master_memory" {} -variable "master_disk_size" {} - -## Worker ## -variable "worker_cores" {} -variable "worker_memory" {} -variable "worker_disk_size" {} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 8c622fdfc14..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - vsphere = { - source = "hashicorp/vsphere" - version = ">= 1.24.3" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/vsphere/output.tf b/contrib/terraform/vsphere/output.tf deleted file mode 100644 index a4338d9be3e..00000000000 --- a/contrib/terraform/vsphere/output.tf +++ /dev/null @@ -1,31 +0,0 @@ -output "master_ip_addresses" { - value = module.kubernetes.master_ip -} - -output "worker_ip_addresses" { - value = module.kubernetes.worker_ip -} - -output "vsphere_datacenter" { - value = var.vsphere_datacenter -} - -output "vsphere_server" { - value = var.vsphere_server -} - -output "vsphere_datastore" { - value = var.vsphere_datastore -} - -output "vsphere_network" { - value = var.network -} - -output "vsphere_folder" { - value = var.folder -} - -output "vsphere_pool" { - value = "${terraform.workspace}-cluster-pool" -} diff --git a/contrib/terraform/vsphere/sample-inventory/cluster.tfvars b/contrib/terraform/vsphere/sample-inventory/cluster.tfvars deleted file mode 100644 index dfa0a3d4fdd..00000000000 --- a/contrib/terraform/vsphere/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,33 +0,0 @@ -prefix = "default" - -inventory_file = "inventory.ini" - -machines = { - "master-0" : { - "node_type" : "master", - "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 - }, - "worker-0" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 - }, - "worker-1" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 - } -} - -gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2 - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -vsphere_datacenter = "i-did-not-read-the-docs" -vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster -vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000 -vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com - -template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg diff --git a/contrib/terraform/vsphere/sample-inventory/group_vars b/contrib/terraform/vsphere/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/vsphere/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/vsphere/templates/inventory.tpl b/contrib/terraform/vsphere/templates/inventory.tpl deleted file mode 100644 index 28ff28ac223..00000000000 --- a/contrib/terraform/vsphere/templates/inventory.tpl +++ /dev/null @@ -1,17 +0,0 @@ - -[all] -${connection_strings_master} -${connection_strings_worker} - -[kube_control_plane] -${list_master} - -[etcd] -${list_master} - -[kube_node] -${list_worker} - -[k8s_cluster:children] -kube_control_plane -kube_node diff --git a/contrib/terraform/vsphere/variables.tf b/contrib/terraform/vsphere/variables.tf deleted file mode 100644 index 03f9007e11d..00000000000 --- a/contrib/terraform/vsphere/variables.tf +++ /dev/null @@ -1,101 +0,0 @@ -## Global ## - -# Required variables - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - ip = string - netmask = string - })) -} - -variable "network" {} - -variable "gateway" {} - -variable "vsphere_datacenter" {} - -variable "vsphere_compute_cluster" {} - -variable "vsphere_datastore" {} - -variable "vsphere_user" {} - -variable "vsphere_password" { - sensitive = true -} - -variable "vsphere_server" {} - -variable "ssh_public_keys" { - description = "List of public SSH keys which are injected into the VMs." - type = list(string) -} - -variable "template_name" {} - -# Optional variables (ones where reasonable defaults exist) -variable "vapp" { - default = false -} - -variable "interface_name" { - default = "ens192" -} - -variable "folder" { - default = "" -} - -variable "prefix" { - default = "k8s" -} - -variable "inventory_file" { - default = "inventory.ini" -} - -variable "dns_primary" { - default = "8.8.4.4" -} - -variable "dns_secondary" { - default = "8.8.8.8" -} - -variable "firmware" { - default = "bios" -} - -variable "hardware_version" { - default = "15" -} - -## Master ## - -variable "master_cores" { - default = 4 -} - -variable "master_memory" { - default = 4096 -} - -variable "master_disk_size" { - default = "20" -} - -## Worker ## - -variable "worker_cores" { - default = 16 -} - -variable "worker_memory" { - default = 8192 -} -variable "worker_disk_size" { - default = "100" -} diff --git a/contrib/terraform/vsphere/versions.tf b/contrib/terraform/vsphere/versions.tf deleted file mode 100644 index 8c622fdfc14..00000000000 --- a/contrib/terraform/vsphere/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - vsphere = { - source = "hashicorp/vsphere" - version = ">= 1.24.3" - } - } - required_version = ">= 0.13" -} diff --git a/index.html b/index.html deleted file mode 100644 index 31d21576803..00000000000 --- a/index.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - - Kubespray - Deploy a Production Ready Kubernetes Cluster - - - - - - - -
- - - - - - - diff --git a/inventory/2SpeedLab/group_vars/all/all.yml b/inventory/2SpeedLab/group_vars/all/all.yml new file mode 100644 index 00000000000..0d624bfcc64 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/all.yml @@ -0,0 +1,139 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values only 'external' after K8s v1.31. +# cloud_provider: + +# External Cloud Controller Manager (Formerly known as cloud provider) +# cloud_provider must be "external", otherwise this setting is invalid. +# Supported external cloud controllers are: 'openstack', 'vsphere', 'oci', 'huaweicloud', 'hcloud' and 'manual' +# 'manual' does not install the cloud controller manager used by Kubespray. +# If you fill in a value other than the above, the check will fail. +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed +# http_proxy: "" +# https_proxy: "" +# https_proxy_cert_file: "" + +## Refer to roles/kubespray_defaults/defaults/main/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes +## in the no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false + +## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases. +allow_unsupported_distribution_setup: false diff --git a/inventory/2SpeedLab/group_vars/all/containerd.yml b/inventory/2SpeedLab/group_vars/all/containerd.yml new file mode 100644 index 00000000000..efa1769fc2c --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/containerd.yml @@ -0,0 +1,61 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# Containerd debug socket location: unix or tcp format +# containerd_debug_address: "" + +# Containerd log level +# containerd_debug_level: "info" + +# Containerd logs format, supported values: text, json +# containerd_debug_format: "" + +# Containerd debug socket UID +# containerd_debug_uid: 0 + +# Containerd debug socket GID +# containerd_debug_gid: 0 + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +# Registries defined within containerd. +# containerd_registries_mirrors: +# - prefix: docker.io +# mirrors: +# - host: https://registry-1.docker.io +# capabilities: ["pull", "resolve"] +# skip_verify: false +# header: +# Authorization: "Basic XXX" + +# containerd_max_container_log_line_size: 16384 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/inventory/2SpeedLab/group_vars/all/coreos.yml b/inventory/2SpeedLab/group_vars/all/coreos.yml new file mode 100644 index 00000000000..22c21666304 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/inventory/2SpeedLab/group_vars/all/cri-o.yml b/inventory/2SpeedLab/group_vars/all/cri-o.yml new file mode 100644 index 00000000000..757dab84c93 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/cri-o.yml @@ -0,0 +1,9 @@ +# Registries defined within cri-o. +# crio_insecure_registries: +# - 10.0.0.2:5000 + +# Auth config for the registries +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/inventory/2SpeedLab/group_vars/all/database_nodes.yml b/inventory/2SpeedLab/group_vars/all/database_nodes.yml new file mode 100644 index 00000000000..2dd37f21267 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/database_nodes.yml @@ -0,0 +1,8 @@ +# Taints for database nodes +node_taints: + - "database=true:NoSchedule" + +# Optional: Add node labels +node_labels: + node-type: database + workload: database diff --git a/inventory/2SpeedLab/group_vars/all/etcd.yml b/inventory/2SpeedLab/group_vars/all/etcd.yml new file mode 100644 index 00000000000..39600c35fbe --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host diff --git a/inventory/2SpeedLab/group_vars/all/offline.yml b/inventory/2SpeedLab/group_vars/all/offline.yml new file mode 100644 index 00000000000..07bd5fc8010 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/offline.yml @@ -0,0 +1,114 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + + +## Two options - Override entire repository or override only a single binary. + +## [Optional] 1 - Override entire binary repository +# github_url: "https://my_github_proxy" +# dl_k8s_io_url: "https://my_dl_k8s_io_proxy" +# storage_googleapis_url: "https://my_storage_googleapi_proxy" +# get_helm_url: "https://my_helm_sh_proxy" + +## [Optional] 2 - Override a specific binary +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/v{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-v{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/crictl-v{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/v{{ etcd_version }}/etcd-v{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/v{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/v{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/v{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-v{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] runc: if you set container_manager to containerd or crio +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.{{ image_arch }}" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" +# crio_download_url: "{{ files_repo }}/storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.v{{ crio_version }}.tar.gz" +# skopeo_download_url: "{{ files_repo }}/github.com/lework/skopeo-binary/releases/download/v{{ skopeo_version }}/skopeo-linux-{{ image_arch }}" + +# [Optional] containerd: only if you set container_runtime: containerd +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + + +## CentOS/Redhat/AlmaLinux +### For EL8, baseos and appstream must be available, +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml new file mode 100644 index 00000000000..9af010ee219 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,248 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: true + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: true +metrics_server_container_port: 10250 +metrics_server_kubelet_insecure_tls: true +metrics_server_metric_resolution: 15s +metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +metrics_server_host_network: false +metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.24" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an external CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# Gateway API CRDs +gateway_api_enabled: false + +# Nginx ingress controller deployment +ingress_nginx_enabled: true +# ingress_nginx_host_network: false +# ingress_nginx_service_type: LoadBalancer +# ingress_nginx_service_annotations: +# example.io/loadbalancerIPs: 1.2.3.4 +# ingress_nginx_service_nodeport_http: 30080 +# ingress_nginx_service_nodeport_https: 30081 +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx +# ingress_nginx_without_class: true +# ingress_nginx_default: false + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# cert_manager_dns_policy: "ClusterFirst" +# cert_manager_dns_config: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +# cert_manager_controller_extra_args: +# - "--dns01-recursive-nameservers-only=true" +# - "--dns01-recursive-nameservers=1.1.1.1:53,8.8.8.8:53" + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +metallb_namespace: "metallb-system" +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_config: +# speaker: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# controller: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# address_pools: +# primary: +# ip_range: +# - 10.5.0.0/16 +# auto_assign: true +# pool1: +# ip_range: +# - 10.6.0.0/16 +# auto_assign: true +# pool2: +# ip_range: +# - 10.10.0.0/16 +# auto_assign: true +# layer2: +# - primary +# layer3: +# defaults: +# peer_port: 179 +# hold_time: 120s +# communities: +# vpn-only: "1234:1" +# NO_ADVERTISE: "65535:65282" +# metallb_peers: +# peer1: +# peer_address: 10.6.0.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# communities: +# - vpn-only +# address_pool: +# - pool1 +# peer2: +# peer_address: 10.10.0.1 +# peer_asn: 64513 +# my_asn: 4200000000 +# communities: +# - NO_ADVERTISE +# address_pool: +# - pool2 + +argocd_enabled: false +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated and stored in `argocd-initial-admin-secret` in the argocd namespace defined above. +# Using the argocd CLI the generated password can be automatically be fetched from the current kubectl context with the command: +# argocd admin initial-password -n argocd +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl + +# Kube VIP +kube_vip_enabled: false +# kube_vip_arp_enabled: true +# kube_vip_controlplane_enabled: true +# kube_vip_address: 192.168.56.120 +# loadbalancer_apiserver: +# address: "{{ kube_vip_address }}" +# port: 6443 +# kube_vip_interface: eth0 +# kube_vip_services_enabled: false +# kube_vip_dns_mode: first +# kube_vip_cp_detect: false +# kube_vip_leasename: plndr-cp-lock +# kube_vip_enable_node_labeling: false +# kube_vip_lb_fwdmethod: local + +# Node Feature Discovery +node_feature_discovery_enabled: false +# node_feature_discovery_gc_sa_name: node-feature-discovery +# node_feature_discovery_gc_sa_create: false +# node_feature_discovery_worker_sa_name: node-feature-discovery +# node_feature_discovery_worker_sa_create: false +# node_feature_discovery_master_config: +# extraLabelNs: ["nvidia.com"] diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 00000000000..cb9fa2438e7 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,372 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook or kube_apiserver_authorization_config_authorizers must configure a type: Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if ipv6_stack is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if ipv6_stack is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if ipv6_stack is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables, nftables +# TODO: it needs to be changed to nftables when the upstream use nftables as default +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: {{ inventory_hostname }} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential +# Apply extra options to coredns kubernetes plugin +# coredns_kubernetes_extra_opts: +# - 'fallthrough example.local' +# Forward extra domains to the coredns kubernetes plugin +# coredns_kubernetes_extra_domains: '' + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +# kubelet_runtime_cgroups: "/{{ kube_service_cgroups }}/{{ container_manager }}.service" +# kubelet_kubelet_cgroups: "/{{ kube_service_cgroups }}/kubelet.service" + +## Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service" +# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +# Whether to run kubelet and container-engine daemons in a dedicated cgroup. +# kube_reserved: false +## Uncomment to override default values +## The following two items need to be set when kube_reserved is true +# kube_reserved_cgroups_for_service_slice: kube.slice +# kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}" +# kube_memory_reserved: 256Mi +# kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +## The following two items need to be set when system_reserved is true +# system_reserved_cgroups_for_service_slice: system.slice +# system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}" +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00" + +kubeadm_patches_dir: "{{ kube_config_dir }}/patches" +kubeadm_patches: [] +# See https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#patches +# Correspondance with this link +# patchtype = type +# target = target +# suffix -> managed automatically +# extension -> always "yaml" +# kubeadm_patches: +# - target: kube-apiserver|kube-controller-manager|kube-scheduler|etcd|kubeletconfiguration +# type: strategic(default)|json|merge +# patch: +# metadata: +# annotations: +# example.com/test: "true" +# labels: +# example.com/prod_level: "{{ prod_level }}" +# - ... +# Patches are applied in the order they are specified. + +# Set to true to remove the role binding to anonymous users created by kubeadm +remove_anonymous_access: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 00000000000..cbe8c2a98c0 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,126 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true +# nat_outgoing_ipv6: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutually exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 00000000000..98e319d50d4 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,390 @@ +--- +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Enable l2 announcement from cilium to replace Metallb Ref: https://docs.cilium.io/en/v1.14/network/l2-announcements/ +cilium_l2announcements: false + +# Cilium agent health port +# cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +# cilium_identity_allocation_mode: kvstore + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +# cilium_memory_limit: 500M +# cilium_cpu_limit: 500m +# cilium_memory_requests: 64M +# cilium_cpu_requests: 100m + +# Overlay Network Mode +# cilium_tunnel_mode: vxlan + +# LoadBalancer Mode (snat/dsr/hybrid) Ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#dsr-mode +# cilium_loadbalancer_mode: snat + +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/partial) +# cilium_kube_proxy_replacement: partial + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all connected clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunneling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +# cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +# cilium_encryption_type: "ipsec" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +### Host Firewall and Policy Audit Mode +# cilium_enable_host_firewall: false +# cilium_policy_audit_mode: false + +# Hubble +### Enable Hubble without install +# cilium_enable_hubble: false +### Enable Hubble-ui +### Installed by default when hubble is enabled. To disable set to false +# cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" +### Enable Hubble Metrics +# cilium_enable_hubble_metrics: false +### if cilium_enable_hubble_metrics: true +# cilium_hubble_metrics: {} +# - dns +# - drop +# - tcp +# - flow +# - icmp +# - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +### Tune cilium_hubble_event_buffer_capacity & cilium_hubble_event_queue_size values to avoid dropping events when hubble is under heavy load +### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535 +### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095) +# cilium_hubble_event_buffer_capacity: 4095 +### Buffer size of the channel to receive monitor events. +# cilium_hubble_event_queue_size: 50 + +# Override the DNS suffix that Hubble-Relay uses to resolve its peer service. +# It defaults to the inventory's `dns_domain`. +# cilium_hubble_peer_service_cluster_domain: "{{ dns_domain }}" + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +# cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +# cilium_agent_custom_args: [] + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +# cilium_operator_api_serve_addr: "127.0.0.1:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Enable BGP Control Plane +# cilium_enable_bgp_control_plane: false + +# -- Configure Loadbalancer IP Pools +# cilium_loadbalancer_ip_pools: +# - name: "blue-pool" +# cidrs: +# - "10.0.10.0/24" +# ranges: +# - start: "20.0.20.100" +# stop: "20.0.20.200" +# - start: "1.2.3.4" + +# -- Configure BGP Instances (New bgpv2 API v1.16+) +# cilium_bgp_cluster_configs: +# - name: "cilium-bgp" +# spec: +# bgpInstances: +# - name: "instance-64512" +# localASN: 64512 +# peers: +# - name: "peer-64512-tor1" +# peerASN: 64512 +# peerAddress: '10.47.1.1' +# peerConfigRef: +# name: "cilium-peer" +# nodeSelector: +# matchExpressions: +# - {key: somekey, operator: NotIn, values: ['never-used-value']} + +# -- Configure BGP Peers (New bgpv2 API v1.16+) +# cilium_bgp_peer_configs: +# - name: cilium-peer +# spec: +# # authSecretRef: bgp-auth-secret +# gracefulRestart: +# enabled: true +# restartTimeSeconds: 15 +# families: +# - afi: ipv4 +# safi: unicast +# advertisements: +# matchLabels: +# advertise: "bgp" +# - afi: ipv6 +# safi: unicast +# advertisements: +# matchLabels: +# advertise: "bgp" + +# -- Configure BGP Advertisements (New bgpv2 API v1.16+) +# cilium_bgp_advertisements: +# - name: bgp-advertisements +# labels: +# advertise: bgp +# spec: +# advertisements: +# # - advertisementType: "PodCIDR" +# # attributes: +# # communities: +# # standard: [ "64512:99" ] +# - advertisementType: "Service" +# service: +# addresses: +# - ClusterIP +# - ExternalIP +# - LoadBalancerIP +# selector: +# matchExpressions: +# - {key: somekey, operator: NotIn, values: ['never-used-value']} + +# -- Configure BGP Node Config Overrides (New bgpv2 API v1.16+) +# cilium_bgp_node_config_overrides: +# - name: bgp-node-config-override +# spec: +# bgpInstances: +# - name: "instance-65000" +# routerID: "192.168.10.1" +# localPort: 1790 +# peers: +# - name: "peer-65000-tor1" +# localAddress: fd00:10:0:2::2 +# - name: "peer-65000-tor2" +# localAddress: fd00:11:0:2::2 + +# -- Configure BGP Peers (Legacy v1.16+) +# cilium_bgp_peering_policies: +# - name: "01-bgp-peering-policy" +# spec: +# virtualRouters: +# - localASN: 64512 +# exportPodCIDR: false +# neighbors: +# - peerAddress: '10.47.1.1/24' +# peerASN: 64512 +# serviceSelector: +# matchExpressions: +# - {key: somekey, operator: NotIn, values: ['never-used-value']} + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true + +# A list of extra rules variables to add to clusterrole for cilium operator, formatted like: +# cilium_clusterrole_rules_operator_extra_vars: +# - apiGroups: +# - '""' +# resources: +# - pods +# verbs: +# - delete +# - apiGroups: +# - '""' +# resources: +# - nodes +# verbs: +# - list +# - watch +# resourceNames: +# - toto +# cilium_clusterrole_rules_operator_extra_vars: [] + +# Cilium extra values, use any values from cilium Helm Chart +# ref: https://docs.cilium.io/en/stable/helm-reference/ +# cilium_extra_values: {} diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml new file mode 100644 index 00000000000..8850210c466 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml @@ -0,0 +1,51 @@ +--- +# custom_cni network plugin configuration +# There are two deployment options to choose from, select one + +## OPTION 1 - Static manifest files +## With this option, referred manifest file will be deployed +## as if the `kubectl apply -f` method was used with it. +# +## List of Kubernetes resource manifest files +## See tests/files/custom_cni/README.md for example +# custom_cni_manifests: [] + +## OPTION 1 EXAMPLE - Cilium static manifests in Kubespray tree +# custom_cni_manifests: +# - "{{ playbook_dir }}/../tests/files/custom_cni/cilium.yaml" + +## OPTION 2 - Helm chart application +## This allows the CNI backend to be deployed to Kubespray cluster +## as common Helm application. +# +## Helm release name - how the local instance of deployed chart will be named +# custom_cni_chart_release_name: "" +# +## Kubernetes namespace to deploy into +# custom_cni_chart_namespace: "kube-system" +# +## Helm repository name - how the local record of Helm repository will be named +# custom_cni_chart_repository_name: "" +# +## Helm repository URL +# custom_cni_chart_repository_url: "" +# +## Helm chart reference - path to the chart in the repository +# custom_cni_chart_ref: "" +# +## Helm chart version +# custom_cni_chart_version: "" +# +## Custom Helm values to be used for deployment +# custom_cni_chart_values: {} + +## OPTION 2 EXAMPLE - Cilium deployed from official public Helm chart +# custom_cni_chart_namespace: kube-system +# custom_cni_chart_release_name: cilium +# custom_cni_chart_repository_name: cilium +# custom_cni_chart_repository_url: https://helm.cilium.io +# custom_cni_chart_ref: cilium/cilium +# custom_cni_chart_version: (e.g.: 1.14.3) +# custom_cni_chart_values: +# cluster: +# name: "cilium-demo" diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 00000000000..64d20a825bb --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-kube-router.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 00000000000..8008b98a132 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,67 @@ +# See roles/network_plugin/kube-router/defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Enables BGP graceful restarts +# kube_router_bgp_graceful_restart: true + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml new file mode 100644 index 00000000000..d2534e72f12 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml @@ -0,0 +1,6 @@ +--- +# private interface, on a l2-network +macvlan_interface: "eth1" + +# Enable nat in default gateway network interface +enable_nat_default_gateway: true diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/kube_control_plane.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/kube_control_plane.yml new file mode 100644 index 00000000000..c90f8f2ab0f --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/kube_control_plane.yml @@ -0,0 +1,11 @@ +# Reservation for control plane kubernetes components +# kube_memory_reserved: 512Mi +# kube_cpu_reserved: 200m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" + +# Reservation for control plane host system +# system_memory_reserved: 256Mi +# system_cpu_reserved: 250m +# system_ephemeral_storage_reserved: 2Gi +# system_pid_reserved: "1000" diff --git a/inventory/2SpeedLab/inventory.ini b/inventory/2SpeedLab/inventory.ini new file mode 100644 index 00000000000..605372e2d14 --- /dev/null +++ b/inventory/2SpeedLab/inventory.ini @@ -0,0 +1,28 @@ +# This inventory describe a HA typology with stacked etcd (== same nodes as control plane) +# and 3 worker nodes +# See https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html +# for tips on building your # inventory + +# Configure 'ip' variable to bind kubernetes services on a different ip than the default iface +# We should set etcd_member_name for etcd cluster. The node that are not etcd members do not need to set the value, +# or can set the empty string value. +[kube_control_plane] +node1 ansible_host=10.10.24.109 ip=10.10.24.109 etcd_member_name=etcd1 +node2 ansible_host=10.10.25.114 ip=10.10.25.114 etcd_member_name=etcd2 +node3 ansible_host=10.10.24.62 ip=10.10.24.62 etcd_member_name=etcd3 + +[etcd:children] +kube_control_plane + +[kube_node] +node4 ansible_host=10.10.25.27 +node5 ansible_host=10.10.24.155 +node6 ansible_host=10.10.25.35 +node7 ansible_host=10.10.25.74 +node8 ansible_host=10.10.24.161 +node9 ansible_host=10.10.24.90 + +[database] +node7 ansible_host=10.10.25.74 +node8 ansible_host=10.10.24.161 +node9 ansible_host=10.10.24.90 \ No newline at end of file diff --git a/logo/LICENSE b/logo/LICENSE deleted file mode 100644 index 8f2aa434480..00000000000 --- a/logo/LICENSE +++ /dev/null @@ -1 +0,0 @@ -# The Kubespray logo files are licensed under a choice of either Apache-2.0 or CC-BY-4.0 (Creative Commons Attribution 4.0 International). diff --git a/logo/logo-clear.png b/logo/logo-clear.png deleted file mode 100644 index 3ce32f6e33fecaf4c0392a88bfc7e0892fcd7552..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4679 zcmZ`-byO2@*JjiRseu9#Dk0^FAq~RlW}wm?3c~0{kPQ$i1vW~;;YdNcdr~sG1Qa9$ z>5hTa%lrQQefOSwpL6cH_qqSw^Lu_VFntXgs=HJ~L_{=NnyN-On12)T6y!IVskM_k;h()`cZ)M%?Klf7hI; z*vwokGP{9su4`q9%W7L-T&3ijJ?`**RU{`+`q$mRZ_5jsRg)QAoN27)7pTDo#15Z1 z>ww>=;o$x-rGNz0MiW2RW=>nVZve*@cmJB&Z1sQ-sthjXr$g(KHfv%;aAv;5dw*PW zcGzQ9jh?*|@Y$NC4L?PpbcvBDTuW`$DdmEOlao|`sw;|_{(zz3IZjFv3 zfw+-+YY?bQcrqtNj12&;QBI8RWJry2aQ9^&lH}AaBn$^H5@W~z5K&C^*z)je9SEkl zOA0ZlBa?YN?3>b#$xdk*&^%v~zQl}|Ym)ImwLWX}>*=|%%;mcUrqLh4kD}i=6QbYP zQ%7QK|8gWCMiN+>v2rQ2+9$GexwpumZhO&JcxE2j9f0Yl>iV}r z!7XWXKgEi?+|OY>(}Yuu<9SJzn}2tn#xR3is;X+w;3oeah*cgO+r0^yW!rHfbl;)) z@YeO*W7aPWiSuIMr+W~;hi^+lEC-lBA{Nr+* zlv^7umY(*b^f6cXK8rK3G?6=>UB?^0>D+hb>GL;&6Ay}?t8!%bQDJu7Zi^3vOqb*C zxvjK_oT^<_hw7gmK&EN{-XFk$EW0m+hL9$j;&>Kd`FZIc?Qxu~)6<)=jA0-=Lj>cG+nJKO#Cxv&>_T8 z7u?TT3lf~Dj;Hpe9sQ=zJxFQ^NN_hMy&sj_#al1ru_>#_?h##jW|+_FPo1YF?6-p zeu)7_4I?;Mn?)|Ls+vnT>#HTFV_1cyGulQ}=tJgM6L34E;~P+*s~q=jbfrV3C9|%m zM^|6m^!Q#WUvV7*nx?_gY&zB@um6{#COdmk;R$p5UXsGUyiXCANcT;oH>mpC#)*1L z++6H}Y5-jFP}(iU-9T^4^!-IdEmGm@#}x4m!_F8u*D2G%M7{>t+U=;84)w=zXYOu1;&6L78 zLs3DNX51v-t0(+pC0j$TvNdOk`rz&rC+ZDu{E9|eT4Uk#Us+UTi`F-6pO`Wdjhn+r?c2|TzlTbj-N|+REUU8XHz(}VCEXvq zSt&0ASMz4|bpcMZt5>`$|LyDI+~u+LCgrVJMWJ&ZPJ0zMTjfPToPs|}qx1Dj11>gY z2}1dakvc%2A~GC@a(hG;mp|7|8bQ$S38k5s|pPX~|{o4zu!eSUw+9wwk}_ zP6WJ`Z10d{U96tHJQ=nlD3fHur0vhNm_*IXWBsf@3RKVQ0|I^HR1h2lUDGPx9Um3C zolyQw7+424T;XgKIcms~6mT)d8`Fk7zKt%&aC6gXpAcqumO4u0e>Q8-%qaHS-oC)L z)*gafY=kK&&XsqINxwg>BdsP~X|^xPl196c+=*Qudksy7y7&eulKSAX+U2zxl16Re zZPtFdx|}a8@5|bRoRr)1HRyg)Ax3YKVeSub<9t3q8%yxpL6|Pw(1MIG0RPoiMLApk zsYZ6osW5`8i9CS0s!@kmQUMAGRm@zq5sR^k^_!-D{0YHX8Sgpv%5*L)4&bR}L+-2% zTR(D-9L{lVS*;A)=(mU~lTA!G$UxHhNWIlTZ-~Y8RV@sFjYn92z9YQ*xw&GxBnx)y z|NFaQWO`#t#4?dFQ)c9e_8KI$TZHj1*HWx*rS>ko~-PxRGPU?;O(a(zWhUlRmM1j>%hg zGv*PUG785Yqwcz;%vuj884JF7a!Ho|TBE5ito*fM6V{K3+>|kQtGH-u_~tDzeCFV8 zKk-`^Wx!%>NAUZuJ#%;aqgl)f@H4>0RDyc5_G`T`b99xCqQIC1+wyDmRz>Fy&_rHL zamD9@F1G$1$JM1yM2dq)?Dp3J_bKBSU_CEzbcd>$bY9g7m+(spJk>~uezHM>d6`Cn z+&xdQNGy(}-aJfzcNTkTQje>OV8o}lSy>wTR4<#zP88abSy}?dU*rH|#YE$=F}(EO z!yyIp9mi61kc_dP`D+6>W>@_Qoq2Zx+3u>@45~v~T8xmSAx8R4=_GF@U6tSPLquK5 znk!EL7h~0Q?gZoZknbBdxEeuVSOrDtq)L-2(h=r924z(;N0FM7*!$%!+e+!5dxyM9 zdX;ZbNG{dfmWYv0LU4`51;HQhbd>qLAd_!NG2%2=hpE?UdYm~I;j$)$=~+15>Bn7~ z)5i0ad%O^U3g{6K;*Zb27##*>WKjoCcw56~kdOlBmvj=Q5Q~ia=!n(GX5owuP1uVv zxlwgCBa4s%v`CsokdocB5(KX~^ibR@bYoRCDfv6{-3cwA`085zu_-0T> z3fMA?FngGxSW~LPI$j=oB!O)(4z4&~uuOf)sE&m6%j$Xr`4r*Id7B~@kJ?hRqYGHj zMVYu_os7qE`IHRRDlA#PNX#=awAD}|?qa~RGaaGTve~qy@K`NZmG3+1=sAsCiQL0Zp@EgrP&Z^UW`GQx^QXe&%CX7y5yZ5I+E4a|wD=Kvi$vh7 za)tA)p__LC4}VIyw@yB)k)7RaNeiya&--?Em_VtGgiyn(wprqqw}Vi3hP+)lvc_ND zObY<_t84oCs81+LJaM?u7u%3DXOB+~tnYzlL%$}$ z**7dyPHw;bQh~GgI{#Yufpvh4Hzi)uua`!{Unm}bnVc@{)otz86(3=~9%fFQ9M+tH%%F92 z-b-k>@or|i7_$-xO(j^66q8N;o~4KU%La=Mvv1xTzv@B??F1Fdf2Dy%e3pf&F1FKg zMoiyho{A*@`tbqtj2Ybm{{z)tA-v9{zv=CV@cvO2alaz)bdh!NS} z#Z067K62iaJkSie(AF;Jzjt>Lt)q6P>w;wAvu{<30}C^tXKD(iiSx^MHIb0|f8Z?< zMRTJi&CG^47w4p;kX#zGyM43qokf8=NjRC%`xC^M-1kI5=(F)b=@s4HQODZ>L7%w^ zb^WYgqw55R>4}(po*o7zj>{;T0KAb0Jm}u%#1A*B9ym>M2M&d7p>ScNm!3n#wxW(y z$>A~{IwIJ9A7IB``|Hl>e7&u=%1H0RSj^T(X)isqVwhoM1t13-ig3(j zE8rM@A3ZkM%89U1m6|X4N>_BXEX(s_Azt0D&w0^&ejGK{je5nKy}3F``@si%PnU`C zDNV|tTw3!vm^xFz$ej;Wv_pSuRqD>+>BnW(ELgY;ro5@FA~KYY^Pjq2c+6LmTR-^J z-`AYDGAs`;?k1#|H0|jJxYjw)8i{ zOx_{Y&h`=&Cs@*@<6b*`lu0IdsCdA#B zlKK2yse8)Gya{VmR+#o7EAvRB(ko#2MTR8eNO&ZvYfESI77{Y-pNER7yoz>~cS`Hs z=pufNgwSm`w*P_SVV*{gk8Q~4UJF1o#H}nAPbNS?@}A0P#PX-NN%M__umRk+^!?VP z>s7Rt79UAv0>bGi@TKd*Hj_)v)^670gH`h@O_F5BISc zj)p85ni!-90~CU2C|0HE6O>u^i>PH=b;om>!UmkU{wUQ@ti=9LnWMG=NWZz6lL?<& zE3XcULbmWO#lk-_=O^jW7PgYhq&KCuBjO@<4BG@XZ_ospKm$C6mxgFM$4$j-w!+dz8h4iFt|_ABnTY;lUnxn=;SL&G}yK=6jfSOWLMMaA*iA zgOUSZI$MGG>7d+E|9r2BHmubFuS1eM8W@joAfQ}?yLgN*!kvZUe)uVH%Ym}VHZX`L zV7Lb#<3@VN2@v0(E3|t^!7W+;nN3Ie|FmS2GsQH$()M9inAXkD5RsOezG|7WE%HB0 C;MWQO diff --git a/logo/logo-clear.svg b/logo/logo-clear.svg deleted file mode 100644 index 7d60232126e..00000000000 --- a/logo/logo-clear.svg +++ /dev/null @@ -1,80 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-dark.png b/logo/logo-dark.png deleted file mode 100644 index 5fc3660668ee5af3553353406aa55aa1792688f0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6360 zcmV;}7$@h6P)!9pK~#9!?Oh3Y71wosBx&NbO_R21nkG%1HffvI{n{pOC!ix##@noJfRV3-0(i`?K7ViaTSsglwTjUJ3gFCukxso_v>+^I%>qsTdq*_*j`6 zVAVQ0%@C!I&Ml#Y*os>Ml(hDw22U9_j40rhX+a9 z%uA6QPxQR4h<%=BE#zob(&syz5VL(i+NNV?UNrF_&0q+DpaQpRM{rxR)`!GR9Sd{f z6}Qohh5&Fs@xVX{C+#ObCvB>0trjmL4|IHmW;m#JZ3)ZelU)@Y(l#5Xu;C>CaFDzR zrR>{2wQJ3;#M{X~?03?3Vt-mMj7eS!4{<*=W+Tu-Oz;<4aJ#mxGa3Znjcc%nbBcR> ziD=<*f^;H3SCNagyt0DHJimo8r*78*7Y*uT1)H%3EqMA&LXJV{s*G5@BCv7#6!(Y-%|x{(`e(J}3MG>@YiR`2 z9=?nXI&uoVOWNX!|KJz9geHrIb`}S#@tpJ$#oy{vaJV9gCW%Jou@*Itvr1Yhj_Rd~ zqC`DS3JpW4NLcWYd+5IEE6RSzfB8n51R56Y;nSQ;F;`3p`0w~Htt1m6e4};8evTFl zbX4$zur7a77SKe7XWUiVR~a#7zH&aare=rhGQgNc}qF)$#EqU zuHU$!%v`oX`JbnT*j}OvExoK0mp2y?t(9m}Lov!n{Z=uGkB^*iMCemcoVe4HD~z9H z6-{b*#+@DdDkI-|S2=g_vJ`QvcI;D%`@Z5-E3b2=tG+h`9%xcSIPU3Osk}06p>px^ zRZY@v-u5JDYm#$n@9(Zz~6D#mOeEp4udZVnECXvREyA4v_et(Vd zGypZnPYIj&fvy9T$@5nzSFY7I61u48FsCv$-jUbdwlDt{mTa0l8tT9b+cr5$_<9FV zn5CRNd%oFf>=b#0E+Wbvmopb)$++iUD01b`qcll0v?biIfeVa!;vP8m6Mfzs`T3Cca4`Vo0EkaxkVjqcTp zD5TKhE9z1$d zJS)8DTI9WO>5BMT-OGa+GRN9oGp$j?&LbG#3&SUG!}h(3U0Bc0^{eD)-#?eG{A=>j zfy25l>l1^=i=Tyx3}32Q%Qq;GJoUVxV{ISNL|H>LE%{tfN&B;2Bb3GKw<w8c|OzmhSVq~&s$;nSUX33=5K(i*K*n!AC@WZnz zpFXE#8!l{EY~8F?zB*k~tMJK* z)8anhDtvj`dt#FYGTL#hJ>-oihXlccu>bIJ#cSBM?~rpF7Tf*Z1H`BO)VT}Fi8JSw z@iUhwZTP<-e+^$075i$A*?O!s^7^=GNtBDA?erix1$ZW)$nw*(?L26_visnXX7?gP zKr6=@B(MKce6ZVf%7fq(RL_J1`ChgUxBwBD(LyEyS~=E?+29-9?n4b8gxc#j6u-g` zo(XI^(;*W9tsHB{Z18+5)Vn)ACmzcAOP7_P0Jb1(LWE?!_Pk%6x!5!TtsHBHyyQX1 zB9M9OHP3_|0a35Psbd2}-kvYMq3k<+ObH9N?mZ+nI$kF487{)flqqcGSl^QuH@Myh z9t5Z0U?rNy@mP2=s1&X}? z_l*BM2u{JNN;K_xjj_~)0i~Q^mK8K1u}VOANRT;<3}Wava80U&G~LEi>)59H64ZnM zp|O|oHF?`SIY8W`pFIdpdnWFC!kT9S={X=zU7Cdf5>eRQsa129Rc{w$2RTKGdzx^2 z(~W29v6=KGN4bnrygVlLzmaTv5M(Q}gc*ohVUmq{Ch&~M%~+JSa7a_E;wH;46C)c1 z`w0D}>U4EA3_K`-tP^#kJ^ami!p@b!rdsseKDA!&Ad18UDRkg*{E)9wGA}IO>_;nwfm}SD%+FGFkL`KHX`@G~m@;?0QA-csX2*v^Xb=l;&mOUlntFIlwUY^Y8CVjmE)9+I}bEF7NiEbLGreVQNrv1I;Ff= zkVEd$&^gpKgv%f8{(?9^Oa}lPch8U)pC4JPM#534_U1?kyp@kZS(1QWpw{^>TF!3=_#B`^L@4|?PUIekH145Wm39mv6Y zZ_Ot0rN9b?0h4FSf|X(;x5ZimUAT zNwd?27qbZjN)ScL6rTK9?-9zPbz3~AyFhf(sJW1dyx6=A<(`2|8?JPK`%dpzJ@URj zd#N{r6hGlgrgS^oYxm*O}+T!+1E^QjgW8D4nd_1B&#)znYEf7mj@!s?Vg7cN3bU^dDy^(^!St zqe50ppGw|6hmIQdl6XRxlP+rG6cE9Q+9-xr9scNo;|B1;tD+gZ77=9lR*s)xSk(nL z8zONK{TTgvVc`I2A>6@vd@YH@n}^&cr-0%YSSg*leuOu%8aibT?QEzxL-m0@Dn`c} zh#m8(vlncebgYP286ECa-Z)U3R?qov6bPmIrRt8a$&1=t`21i)HjfX)v1J4!MjO}3 z3RbWZ{%?0Vzb6FKdTt^7pRA3n3Sf>}kb6Q0f*09Ly)&fOB*(n1Onq;acn1CXocN_~ zT2{!5m943$L+t3eYi#1&<)-QrG;Lfb3y4Kzcdbx9sY)j3WVLd#V9z1T051e8qeEmQ z^%8cfFZKuVi!83y)~mbB^mPDf(SqQ#h5)Miqy4t!j0KLoW2e8LaV0TpoU5J*8D*91 zCl>O(WmVgookOeIdB-LpRFL@H)tjWd-oPt>UJZJl71e_PSE8Br1ka9}ChU4WV2+5q z8Uw4A44@^gczLQR8xC$9b(KtoDrA<~@n*{7>NRpovtynfF*$7{$VgFXcviH&PRJBS z2;GT_=BYr{N+PeuM$4$b7VO_GHe=6+CZ+9fBufejCNd3ukfh*3itiILO~*&RPVl}2EN zys5S~v=Vl@x9n<9V1G}Dkds#hQbXWfKn(}0HDyuI*$nI;uZkV}Yb^I{v%5V3k=G&e zrmCc5Kfg`WS+J|O66La!ysDjvuZq#nbz^bCGxio2B=R~;UKK|hq>3`1U-(|_h|Jkd zUKQ}Y%4#Z{Y)?Ssb&0&6`uq-gO*bM3Rg)cXioB{Vthd2*qdftU*Jbjietxw!bqIdg z1t-Z1pg*sct8w=7DpxW!h`d=KuZl4mQ6hwF{X*W=TXt#suH8y8BJyUDyf6NCezV`N zRf)x`lwP09!2E!7N5*d+WM&u>(dV;)bw(K^Pf&=0F%7(J_L|!6qR>+H}9Jm&ofH zc~O4(TmkC&ROV#(M)_|DEta z$P!+y^rKgFkH|~p%}8FjwNT&~1@L6<0R$a9VU}izCal`S-kd94G6^X^sm-N{yhPr# zT1UX5P6BbmgTuWC}M(IVn#}h8U-rZV3YQ+iRLEoaNM9Aw&0U@`r@^Q>eY%mWxR-qTR>$S zRJbuxi5-M;3q;;5k{4lJ*)BKOSY-?FOvqNjga?Dj8v^nInW{>nK=cg{bgP7IEkj^< zNr=25A@7_O)wBQ-m?+U3%|#PJ?LzdJL~jXbC)@^+*8}7QU@OX$K}Fer znT9M@QDvZaE~0|S;2`pbl)M1iR>|rZ;DK40Xb!R{wg_I>e^B4S++|he#S_5N;SggR!4XlP&XhAA-9{pN?E}S zbiL?PA#)Eu?KNCkxOQ{eesV&5(|d%pU5hV(-Tfr&cqT#YLCnd^Zdf^il_t?n0=fnS z3S?7xeo-+}_=p#-*(}IBdEN@8y=1Rp5p>{b^y|0n@#M2Wj$J@IV6yX8PJoH_GEmeR z1@oMGfUx_mb|P{bOEzo^EP3}?IYFP%Qx!kJh&?Q8G7X(PN4mpiHUU#6sJ(|5SvdhV zCudCT+_U3ayDm0fGOo2uX+(Q#j{4%B&tJMM)mYr0=COcfdISa*eS(tK%Y062pQZ=Sh1}Z+jAT6ddPI7_bqt~OW0TQ zVu_npPt;zWfboce_xM`ly2%B+_bss({u*4PQ1^8cf+Ho?J3-u%P^5aHM zT@bi?3Kn_etiN3+d~McJB5#&~d_UO&DLx*X1bOp2^!Z0SOb2xgnlMu&BocYu1u~h# zu;PjlO_W7c@QvFVJ5P>gB+II{6M0?2hdcKx?bzsW5I4qZTa~h}HIg?!mgwxnFVd>p zMdY;)D0JP;#=65k8FM44C~U`W;s1EZsb2_}l%1`o!cN}9$Llr4A+T+mjhqW z6}LyyAupbYyLB-Sb{ixnBpV5dejzUcKgdcBVkU&qkso8TBALKf-vw@76w+lNE4-N{ zS)o_SixeUw?Y06wPVs%&*E5p0RkZw8t2$pZqoi`IaQTFtyhzq83!eu;cjU%c+lVak zx=^ESqJlzRH7hxc9t7PHPrl13*&<>D{J?S9x?so^W!psgoxIT8LL3xg4}wl4 zjqPd~Wq%Pdirb3V=ehZ?G@BPM(*Q%t?cOq_}AeOktD|WD%`eUg;v+Q)|tQz;#3C!Re`gp>~TF3&4r zKZtk<`LRC#;aAbJjz{BI9E1WmfUZv3(N`N0{O-6 z^^}~B6YQyBlSK7L1PEXoKV~3E0CV8&_g4#Z1b?E7J9!Y~-Cw!!M937o)Q6XaYpsqZGfcwq4XOoJNH3%{YO`s_P+L&w?BHF!cF@IY#90 z?6{6ds;RV`Fo35we&mq!eeeCsoNY}$8 zZp}r?+lWN&3&Rm1MWmEiXi<9E5NbY4#Bx6IQA9*})(kPvMnL=NMd@XL0u3MN7P2GQ z;2(}we4jt;WkfdHNP7U0&xp~gGz5#%ORf7B zQbiuIMC6ia;Uhp>S}(n1S3^qP*@#pU6MPJ4O`$mr(Q3QZ(XvT|Ecl0c0XIIen*&VR zvxr0@B1y9y#8sGUQ0uGwu;WyHhTzWtN5^OWygt;NyzX=IES(*)kad0gF-_SjdW_E6-;BeyhZC%py z%QOK5TS%M}Pqg8MHl{k1j&!}gVPm3WPH~Sf(E}$qwy{H{7*4ohXar)ej4P|^Et z-oVOEwGaqdL#t@{t+`Qlzlh#NSr1Oi0i2{Si;Z7AIV3lhD9?{2IwRdCCth(Ig>VUk a5aaih3T4^iyXd3<0000 - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-text-clear.png b/logo/logo-text-clear.png deleted file mode 100644 index b8412407d3c3138e8f408514bdfab2fb3a2f8ca1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13074 zcmbt*g#jNGPR*U{FIql+J;K^k``m5E$JJ0~7=#q+`G&M@x4o-8DLfAl*5d zcYgi7??3QfJJ;E+YbT!be4f0Y`-DPNbpQYdiTxdThXA_|BNt}I zei3|7l#>SB{{0}D@?rsiS6%YblA0bfJ88ro1~TWj`c%?IY+oeQz+gpnB;w)sI)TyW zIpEpnd^ggS!`pUjY*Jdt;g9Y&Z0a*+xNINPAi!&@m2{gKy&r5kRFG&9Y9dOa$~oeu znp=$68r~>xgUD-()or2FJ&fmSPuB5v-9YkrzsR0>`yJ%^_}ujDXw2Z;*j&74eqG_- z#VUSNMwdna)15%<$?Hh)8LpMNH-5?^iJ>H}rKx-06RxTtzJ9sKL`ohSJ7s(J zk<`p_)ue<$pJeBtCHWOxO-I7?f47MypMDXNDUFM(L3fmB?u*)<0P}G5$@Gb{ZVYmBUx$3- zXe+13c)B~UEkSQ;_+Ag5;{|MJL&JNVylQ*P{wHo;?k=6UEW(ASDk8Ym#bmZ!D?x9~ z;xu%|S!g$KK`gc2tARO#_|3JwKG*iFy}{L->gMC{g*$(jefY;j&xdon$^p^T#dRuT z6X9B}Yhlu&6sO*zHhO)z^hhfnjvy|%aBvm%#5qqBm)X&!==!(jR02tECq7S9f2h3Ivp^fF-F@(( zMZMgG%i-s%BmSQ&RK*+VzJ;be+Bki31ljRk(|`V_6dC5h1}ni=XxZ7$NkQf&GZ)Hq zb{Z~|K1)}rIC9O8!QI{$m3^<==6 zlowQ4PEsG0FY7W!|EC5vrxX`?G+|I!f6b&9n#j9d!gj!)Lr#7S9<9qjGv*rg&mO`e ztL+(kzAb(Z?XQ{8c7)xx4-eUKoa~IrTgP5csvqy4VQZqG8=Kb(Z=W97R>}RMKFA=1 z>|uaNOqSf6zbj0stE~UyY#$=KI;(iXs&8bhy+%TO#Fxb$-fYxNmDP;$R%v3U z7i2njAm^UMCj zvTY89TkndcgH-mGt-MG4hW>hd);#(K2d?1%76TcqZ`RyVe5g0N0qxs*3B7x~pU8`nk;ZEj%!bf9ya5Z5Zc*nV^7)N8>Ab=j9}REly=8 z%{DFlZdwzm>OvpNP4J&97ebXW+go}S| z7|yC$6FTJ7E*50vkLQQ=oqM9%cMim7ZX`TmL;GU`8w;^4o$W@Hl&(As6$kU{*GXrc zq`t7H*Bfa6#F_AgGP0U%@02YS>PrVA5Dm2Hs8fcanfAZ1SJV@fatZu4F-tTU%yenA z*?vf-RJWe1%I~*D$XF7Y<0ox$cDmSp5d?HaObx}|d_5jjD}L;=Z31SPV*DH5*sbl+ zpH0qQl&3V;k9h)-|3$-TO8qrJ)KA{>uYSrW*W~02Da9|WxWKNF%Gq4~ZZ`a228V^h zigX$n(SIpZSMB#k((G_>z)@s>5hii${Q{a<{U}R!-mlC7XbPeD8yR zJ70+kq@hDJaR5FR@jVL26q&Z7HwjigG3+lmjvD8953PMVGg`^E z*RUGuTi*-X6cWR&PIo(TD{M}j!@A|mx0^Un|%8&|k&4bZl%E%!+hsvBnH& zPVc&0hN#RijX`q4JAyEeZ-uwEY!b9d)bjjQ(!fQ)T~D zoC{LfLubeIsDZU1v55( z17_JJq2R6to9my35Lf#5PFzcMQHrr^n<83{YJ&bMxVeiG9+(#CM?yBR`8fu)Nk#58nZe(ak^6b zZ}ht_HEkYg47Z|*#FnWAESw|L=&w6lrrUlLRDff2lfIo)m%{cwaGVT;Sk2Xjbnf1bGZdB@XXH=Eb(6`$( zL3^9RDZbXOS22U0xdI7xiAh=hwttH5X&HSuld=u5xPr~YvN0UQE@b}6*s^IF!&LVS zQR1+u{+{ZU$p?=J?NW$yh6jPTIJjXm>R7WwL=G1!&Mr^U(^ z!ySrkH`6Hu+EcvM+ApS9hedLnL*YR#rab;0p8xx;tk6rdLb`^~&-)^c8Pp8u-+q+T zg2j=n7Dx$>vd^4h;@@j!&dmM3=^sT7>oGcP2XSUE~i0dJPoTPY2e#4}(wD(8j&S-C9TJn<} z2U&f58$$nN-}#kKER}+8Tn?t7;UK;vfH}-uut#UOTx>9Aqh}~5+}j&;=URMl&Y0it zhSqrwPaaYVm4Rw zw|p%RQqPQLs*3iDF}o`0t{VG6L)r*{uPgz!X_@c^XBGrMS3pReqB;+>m+^B+iT6OMoJ&c zt>jvCNWDy5b<^@oYwob$BjIrW5wl`1D2>4F_C*xo$aKo6nB2(uF8|#LyxfXubz*qs z`wog0$n{I*3yX>8!kw1~BTqS6PwHOe?G?+$#_|#Ds`P~iag4L}M}qDWLxqKZ$hJ~Z zSP+_DlPPC`U!C`a1#2`W0FGnu4PSFCTCV283-cR`5z;Y7@Ga5NnLAY`rao zy+?jmvz|aH%x!(=Yj4X>%t5*(PsN2w)_NNyM03l(mt{A8uc&PNy%ZC6v0=KkDa+nc zshAk`s?yeP_HVmkRs$c<`*fK*cCK?-drERlcf|YCE1Ot+$-wDX##)h!i6;pI!ik99 zfA%7u&TNqqMhoOnk@6vf)mJnIKK3U$Xu47-RL{ak4*t2#1+~f$L()#&h9&D8=`1YE zTq;Y;1QSH%wi-PPWHz}M;)?UOrVM%3$P%?t#%g*aKYStHzDnjk_x{R#UOR*{v)Z@M-I=-0Qtf-lb(Zfz_vf>m$?bR9~+jzp6Ev=ZH zibC*KhnTQF=`*43p3wc-bNq$OWm6(>6)w_@Pej3fynr+hfO-pPr5^9$>4o(I17c<~ z{J+Aeu8NijHS?lIJ(x+kKY(4qbBZC-l>y73W&b06-lxIG? zGg$r3e40hoYqGa`9^jn#g+yPOWR73#0KQkT%L>T`)gy$OW%4XtY;pr~f71C611rve zObM4!jS@=K9egpc8T5^=9-o?LroCb1 z`nB@L!oz@h5ZS(VPitB%YC6KpFxEm{8;H6&1&awhJ>qp zMTaUai&o_)3hu=4WawvDEgqf%EpGVh9rn{MLf7~QY{<|DzIG)JNEcSoy1 zIG`F^6m>*#lAzwnN5O)$0-=Eq5?Y}>b@A7V%&g`NU{`(fwu(RYv*J=q0etRUCF$69 z?{RN*TTdPP6K{TEhi2_$a$~#%0x~2$ku)WZfWU&Bj^x^Qx zz_QBz?g2hBPW(%kaNZL2dGm}Neftyr&k-3dB`0ykne;k{X=XX6e5Gp)E93Y!BZS?Z z&wQWB`uLa6Ku=v?d>q@|mKGk%kQ?VHPVma*Xz^6nk6*Lx2HleNAdS;FH)q=jUlmWa z#h^~y z5PT978=`ISOKsA%Ra!{5ghAE!h;#s+;#v)iugSQcV_i!!(|LgAqV(*2zXbrTJ?Bes z4EF0P9IjM3daN_GwY+4;F<}c*Uz#o7I#rJNbG`X9aBol*J|cVVXCE&fpbFlxx7T1A zZyZV(zXWAoaf%zW*c;4!aoa5|QkpbyHUWSH;b{seeqxjmxOE@`$9oN~ z=>m#BY3Fw(dHi~c;3B&e$UnLHke@31@VL}M0PkY{lD8zQj|CBHQLn%+Ct zaK=BUGUMfl7~|Z}j;jvyE`Iz41R#C(`1GvWf$m16n)GMq9;dHS)RYZ3iwfiGP*VoE zw;8SEh|`*??J*%QEEZVI&(Gt0XeJRlq>NKA$*xon+j?27vwqRMji{ zOica?WHF@pE9s23O_@`a*EzmGAKtGGAhi71F-)PVS<6>{8f1APY>?q-t~k1^&51NK z4;YM$=}~K1gOlL23G)P#Qrz>UyDoDV1Qv9?TC!7Mtrw+u7s3(vvg#)W z@PM~jd)G!A&x3(g=)5Dxi&;>`tCA!k3*!<-8^m_96;{RE@tR6;3NpX^GF>?PwxwBd zAVC1ll1HVBewRi|RJbnYNZbNa({h z+gkV&Wu5BGIBUZAM=T#eJm)IUxcGcNboeY2u@0to?ceZC}se`RV2uJ zG)9_MY}h2JCkS?B%|=lbOg=Nc|3VhL;#YdGYgg?mCOQowq?X4!v$Q`LEBP96S)}0d zbv(tL%|zf8tRQ|u-oW(zT!Z4zr=D>QE9pw?wE!@`sWf%Q%-z6fel zto}_2gVI2zoqma1z7Ns>K;Hx7FQzb>rE(x0%lkM5YZ-ZPZs$)cIL{-AlSQ;em$h#n z&9SlCNnJ_vhu8`NT%Sn-LAp%-ui8VZo`Yb$%}8B>kURi#1Yc8cd}o<;@3Fnc zOdwXC08V5m>T_7@N#I<9`2i3_SRc&n(VZ)G7a5oNmet*%5}-9#DoAkCyMNC3ZD=D^ z)O`4;QS!a0zB$N{hAJYI98t0t87#fjZgq2`eonD3oBCje|n4Cdqlul(tzN8Z=Q z1|{l;kofCmW<_~wi)hr_&tFy;6e6_$R^vcqbLSWBv_Rl}T+}V-V#>&JbIva3d3-;m zh_>%@Yr8p3Kx(IRLx))}9|KyW;PkX*MgRcDN07>a&0AO&Ru2zU>`wFpaACAY*hITn zup*V?e!^)DOL?3A_|#tALUF_z@FYVb??EwG+M7H5WHVl1Zqg*)%HniY( zAE*G4>KNhqFus>hyr?8SH-fOA3%P98)5Cm1cDW4 zeE;h7JCfhYX~u5Ne%AiEsBg=y`j4CDCtDPp;}UcC-ZWQ;B6VX<`od|L5iyU1_|GtP zw3GlUn6Lw17l(C5ReHu`m@FP*rr`MrT`Z%KUUBW;sMSd1ZQ zO4h~qyxp|A?;SKj*tMGMFPiY{LfPxv72SZZeUB}iuAD!iK91EU`RScD5=hfkg*6Ju z=bEnu<})v&ziRAZtccc{>}EWgBwF)hp?m0k4%KQtF{(KFQv24lM8tdeq_CFArUZ+m z;Um0A(FW({$V)nnk=lDOA~4@%`)>dW0^D>P(m76q52tcIKIEKM8rp2Qmn0nP<8KlN zMZozHq%i)PxbldE`uH(w^CQVVfBPk;KJ+3{b;?m>a`9n`8jj|Cvvlj|#|MzQ=lKWt zQ5wl=jU{@W<0z@%nrW)T^>Vtw4$n^I_>R4g)9`F{ZO!svCoc64%!RuDu~?LInDg+^ zR|5|oz$Rq&@>)2%pcy{cau+Kf*62A9)Y-P?(aSzTl9Dp5JIrae`fhI^$taS_@oiH% z=hm7afHtac($di!W7 z>05ZzL{i_ku)?!o@fg&&_s}1;?$UIRoel5TX|D?bOcLgY#aGFN%AH~C_aJz(r2=i z?!a>!z2L5h0i9`tb1?fQIO9v?@g~qMP%VCrS=rlt6#M8-<{jiv1(l+E%tMXo;dgoq z`}P13V4M(nMF|t0!v?jF&{>catlkjwxM?v>;`6#uEovkARgNVm5Y*Ob3{_3`{!zXX zzQpkt*Hdc|3Zan~&uHuPnP&P*z)g5$+uV9nAloI_@#jKRMKLl>LZ?!JxUEg#&0*Sv zo8gC^*sn&mE@en0kH5auhpJzQz>_&L^<3zqeyVmllGsNdBcd++}EZA@l32m(Oi z5qIWjv|{22fv=4u@r!w-{Q$15e2pM9@6}6Z`xYavvMPp0;GCNi3)7oOk*|)R7155= z@&+i}u{~QPIPx#N_U?jZ@AYa3{S&aEs?hA}G+T&{wsv)2Qg5eQeRCoJ>%QYm4`S64 zu-zZNe3|h?eEpt(Enw3d!Wu~eg=;3AC9a!+Ug9G0t*t$_>IDH>qTN-i%5ABpWs%|6 zlp3IG`4pi!(Y{hp#G09yfNvCSo%s&yN)$CBjMG3&hFQq8 zS@)1;>*v9mxA!;^VXi7h>Y-)rG>z=qtLmJE3S9=p9_Yk(NfL^2K^aap9oj~+Z-FP; zR7}N_n)dmHw=8toa^n+$xZ4_ zxB5jpwY8?vVzrHqb4g*rEu{4!a@cr^dNMQn7vIAs7b?g+Fy&(TKn5Sl<4D$)J>Vyu zqtvC@r7p4L%)YA2>rVKN1{SA65kI+SE~#Sp-Zq-82*1iyH1E~-!Baev{k)!%2Y*MDe6Al8R`AEt^!DCx z5_{zY>9fuF69sIFxm<6p@38|=AF!}AA?hz(Y;mLE0^4qqkG-^O#l3$1|Mk01h+YNWENdgO47U zYPpQmcp!+Uvn&`nOp{4u}x zu1sHW5GwWA_ED=P&7OFP_Lsmh$q1$z`;kBR0*rs0%Lx#&WRQ%Zm?($r<+bc&#Efhu zGk|aJ)F$yj@{*UQw@e)xAnBAz10zT`iJrZpwY12py+nO z+0AF8$2tHM05~qS-`b$vkh}zTRWx+cZ$A7a4C@uIKHIE-KG)Cgrdkwk`Kf&l!h9lx zOv~xa^^KgrPnt#I#gr`ZQi#lsMMKhcea2fPGI1M%Kh81dTK>wuf`Qh z_!~jMxCA1MSm8OV%&)bCPEtkF zi)XaEN_jLC2s;UEtsy#DrBr9>$S%1=haWVCnc*kuyhv$RY{G-lJ^?egNKK>5qKlV52Xk68T3)VR=sm&LsYko=VwcRSe;Qh+?8t<=P$&-Fs>6F^87 zE>iPg`5v>iQe&2^ci-@d%&}pxYM(^6)Z%a?OlI65_%6aJ7(k*pm<2%ndIR!psM+vo zs4I%Choua;G9AJcc3;YtO1_rMQ;{WOWqZ|HH)sF)w}oi?ly`gtj~EnIUzP17b&BLZ z6tmsj6pI2lIzRaxSD-roa^+X5c#K8NE)oA49blXr@oJkrBcwXQr^s#SSi<2sD-?eD zs*hrdg#W_+A#l7Rn?r|{HWK@=*48* zDz-XY)Z`Pt^3=dDV}uOHs{)WWz<6}(VYcGkz8?E+JHsyD7Arvmj*@cxy{H6EWXwyqve?@D93~iV1nS2&QC@i2a6w>aiy`7a)4IfJv_zac_Tu^?%eJ}eZ$*F$=2|R zd3`i0z7CB8$MTz|XBYl*mHrSk{L$o|-gg6R?gKR~KB$*o@w~L$u(&C@v$^rMU}o^< zOT?NH-IkzK9({U$m_!_mmS&7^&%NCNsJY;zxs#zwWd07I=@_zEF&KZ$0Hb}0h)Iq6 zK>O1_|Jv^m)%`Qy`s_>yFz)9av^k$4=4P{1a$ce_obYH(micx#?;dO;6@FBCGP;tL z{lb#++rVt@=VFn2V9w`=`qv+oOWo@QQlg99iUb9 zv3fF{P8(Y@qsNj={3)XGPlP(Qyo4|h0br5`kgl#+lCb5UAeom9Vkxi`uMV>U9sqQ= zx*$8#ul{TN=R(D@@mQuKbCQ?yznY7SHy-EDG}$yc6)IBt5d&aYaNG5nqDCPf`nMu{ zS#>NdPs8s-Wo&3}mQ*9kEnaDzo*%qY2?ps;;+C_`+0jDU;7H>x)tLjGc!$gVXC)&g z-(Xjz0MtI9K>tPC+-?WL-7|Uu!w@4I;I0vji?AcUo9(18>^1l4jd{ssEwLBok0t~! zZHswKmJ_DZk<~jM(0=f=ul?7&~TU3RDyw!KE%&G)_zz$zcg$!J2TU5e6344x}0A*747Wc zT#r$O8%PktZ$gd=L3+5m#Iw*I%_Vn00kBU`v4yOC zG~=uP0Oyp^!wU_$KVO0wkh#14H0&Wz0l8}d*AvbsjAI-r!j~+e^czM5VKv50<-z)I z*2$ZCRMSiW1-jr9UGp%4_`001yg%LNR4Kk|k{RK21!IPtu*$V%%n2Mp3mw>YfqW1Ar)>rVyI_Z$9FswW`+Vg{3wn`ErA~J z?hlzd+;MDQFa(cc&B)pB@b2mFR3GE!P=_y<*XeGz^4SXxM5JsNSL4;8vb2EP)ge`o zXQnOqr1@Ldx;V{#P0^@_NIC%HMHSC#N^w}@s} zYe#(u2OaGjQluS+ePi`8vNP!3o52IrM*0`n>th2Zt6IP~10;r>aqq-u{BztY|8Q&d zw;=G;Qa9;oWBdRGmuh;|4nxQ|b_Dqv;>C3@itgRJVoe~aKa4i4I{ENRXk* zhNQ5ZbW$}tD><7=NKNLbT~vTc8`eyi9lRKUi(}tlsIcIZocIU{@vD7VW9FkdN!b^n zz1dwONUWxT1uYX6z?-skP2t1d&jAwZ z!G;cfc=7%W?FrEvPtI$L?jZ&MiU~MQ!lr%*%=p&jrC0dd=qQLOy=m0%88=>JFa^XA{iK998w={47@Xuw%xU~~ zX^il)mNr?HJqg8*2|>GwClpa!h*kG#6BvosDMgR1x-R2802KHF3m>W~bMd>ZlHR4E zac-#Jw7R-6M8Wj1y(cjc?f;Y@A4R(-Tw7kokG^(<5XP% zfS_&<6xJEZocgv?(->!2TA*vDaeH@jT4Yj=2e+44&S?as2R4s*H2F?;xQ5*;dgez{ zcIK1ho#uxMjje%qRPOvPevAvA-@t9@(bmgMGa&x3bOe_I75!jY4$*x9Q;40+g1^=( zE~OMe)MKX9a$b)R%xM=U7~mtY<0IOiZ`Rge^X!{KUo<3$j?XnrGCKubB-L;WaPwCp zSY_@**oQd^_YzZ`p2x)gG;bYV#|C9{`I=tJ^HX-c-X>o5p-=Zz7{3ijzN3eMZV=|| zj|s5EJ}Cu>jG`5HOwwRAC!h)d+oY6JJETcV3?jTCKLthNjTn#d-1{1>l>bN^ECqGOm?LYLn2+*TM@J*+ z)?<8ba?|VuGo~dqe9h{Q?3`G*xhPeiU+JgK(u~bC@bDKdRobwSMI4S{53PHeSI!%6 zbL;6|+h2jbk7RfcmrEYLuO)vgejr#-aS5TKSrMmZmeBHF!!NLtZ%*UEX>kRh0`H|K zroI(6!cItpD73CUuW{m@o26|n&kJk>Iex!V?;tiizp8&ycy{ofP~*`2wmEX#1;QvK zn#myLG@1+9AHiRU5u(f02O=SOE9Z!3?s_O8hHd_2Li}$;KA_tG--42_{HJP|q{ah9q zViaqqR~kd^g!B^TBJRQJ!Iq@3GIJhwYnle*LH~Atzd~AKK8yR<5ud}`+PUzHc5rg1VcO*H#d~9KAW8U(% zrk8fjg5j|4;MSk+WF;$0uzI3jaE+oQqs7#9(8)i4h5!1(eVx0qcAp8$?m+JoJx)E0 zfAp7%=5Z}F`>u83;H$0s%Z{`VednPk^x=zWKpSAMey|LE7QN=#!T#lbYoIiPX5ZG< zT>?ft1Z`zM&tW96m!W& z_LhwrsJoR#X|k@#@2ohq(EZa58hZA~p~FAzuqY0a_8Uk1ikx@*`GStRnnW66A0JRq z&4?m+{KYlx7|ZAk4ia4BmK$(hVeC4T5mBD==z|w|pTOe1A%*`1tspSGnb7Y2cF%EL%h1RfeMK(!hUwW4zA>e<>~ZkwiE>?qOt*`R=RU zDLC$a?mxcj)7+S_EKCl40r6e78p^u}F2;;?!3XyLc)e^(v|@I@zUMx>!6iTuk|+PW zo#v;fXw=;tsuPUN9Y{vrtLU-&|M#CcT6x&C!cP7^4(RF8zZ(Rn)w+OmHDmEo?UM;m zGt!6B$}i{DQ{Nh@(a>D}Ci(9H@JKYU)B9SQc$GA{;X0$c5Dwa*9wMZpV-eKM&sOTi zQ;D$$jraf40#mn|yv*{$L)9ipYDmw;44ew`7mh7h!z0<-X`GXMMf5u za&QsH9~af?UIqO<3!T3xZM!x9Xkd8Gd&Skdrj8mD*z*z6qby~scJ zL$I(!J|*|HzVp&hai{-p;UxO;p8#)_i;MRx9x}6CQ&r`lxJ0sjmJ-d#O8=62Ax-dB x7hP9TSB4;~zwb6Ye(_I~|G)k4{^%{?98u8yq~}}h*q2-Y@-iyYB~m7V{|B{^R`&n^ diff --git a/logo/logo-text-clear.svg b/logo/logo-text-clear.svg deleted file mode 100644 index b1029ded911..00000000000 --- a/logo/logo-text-clear.svg +++ /dev/null @@ -1,107 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-text-dark.png b/logo/logo-text-dark.png deleted file mode 100644 index 1871c0fb37749de7df02d157df3c2fc5bbfda965..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13384 zcmbumWmr^E_dX0GDFO-#0us_8gVN2=A}u|XNDT}jFo5(ZQj&sn4V^;{T>=714qejS z(hcw6^ZefH{e5`LSnlzTosNIP>~xn-@W ztthYPE_b_~&n_>9udFp(%*)1w&-X+l<)u=d_BXF@nt0f_-<4Q#A8T02X+%kD%yU*U z02tX1nLp_Kn7N!u>=HHU7x6+?jn7)|xuo$dh7`UO8E*FaI9}7dcP;A58wS0dxG*i< z9J)MBZKzhMtA}n~>bcrWJiD2T(Q~&a#8``k^;LH3cZ=<0+-cjbpYyLhU)WmKZB9(- zS)POXO=(d>l^U@WBGsSxt}5}H2C|OShLel*{j1bPcu&84U zR=hGFNV{zXZh0$~`Vj;il8`Xmsq)(-c3tYtouAE7g{&3remuDS!Xw%yeea(l{}lVH45|Mego{;xR@=$3}G^L+}W4UzB7FTQ6q z7!AzhzPqT_jJ+17mvjT8mfUL0Ye;&R53g%qqGMB*s1bQF4{vC=Q zZIzl6f-l|KO8|kWU4*66dSC9;d{VWWZAv%C|8Mbk)wDX8{olThP=!U0>9l*T#@Agg z+ZZ7hHgR8HizGjPp-VID+*y91@$v_y^njVx=k%kek9$9w+y6Z;k7*^Xm+C0L>kJ%z zexa6~W23((f%bERYdjI@_dQ$w+J0!izL$Bic2Y)C|5)hg>91G+sS7`wiOKrxg{S(U zgj88vptGgCy3F`}#`G4;}G8_dd{2K-U z^$&me7_5zcb|!x5ZZrwj71r=)F7ez+QyxA@t!8kJWq)P4ASQhqQi@x1>LE!D?JFsG>z-Gn?YenpK$~OQN5Qj|toCr+ zqsc?c&x4h53Pj()rFRPl1fu-Dd#_V5E6-SlsU#jbH}rwA+@&V&*Yx?1g{8Ucu^lfd zqFtJ+_d3`<&DTZq0h2wSw|2K2kH8pZEF4&?21jQgZ90ym%l4#4OzAc#JHNn_nmIy)y(UC?SeI$qFj&ZLb`mpV}S zyL!>5(6y2rO6i}dJgxww5juUgeFX-}{Ihf|VQY{6Y+Q2~PoQ5(R$wTp-+d?5=e~<~ zcb7|Y=T8quHec>Ls{o=wCDes|669Z}ikF6YBIJ*M6ms-mIz>;=V`k}xtE0(@x#Qoh z?a~8f8wd4Xm-x3AXtZ4Eip*Vq;=m%P7c9h1JNC7to6M)#(s55Ejl)>$>4VyIi6Stw zyMEFAHG?wyE&Gu>-76ob51ZR{?}q!EOiYL{+HiOYlSH7(eYah)r_0$ z9vo9Ym-5BEVDeU_|5^nst5g7Ifae><+#q2dRsSD}0~SV`R_x$o`7Z(plXZ9=qjmT&@pADup)L~ICC$>tN2Byz0lb|= zy7phitCdT~t?9%)+t$UJYj&0(W#J}yW}NEkiImU8Tm?^D=K?1`2vE*mOuPnS{5fVd zjysPDe>fBFo){}PANVv+N7O|UVm+;KU1#DoH&2N)_EH*8wAo;J3J1Dbw zDA>A9yt!wBTG;J%FG&i&kg(^0%J!wL=lqe-7>nR~7V)+2D;>noR1mp|!iN;%|aCbHIG`>m6K}Q3O)Y-9vv9=_*)K zJ9XXO&KzrNEBo-KeAZX`(GPBxyrB6V2{v#Dr{(c$qP{5%F$?jibH18h8o=Wcbcnp^ zY}u38ECoo~y&Q4yrzR<7ry{&f-K-d$VE1Y`t=S2=NyZVU$h1o>3mQFODr3zoOd=<($gWP8PitC3x z=YyWt=NG6843HtoO5MK@f1yR*i-Ws{$Mc+#hIG`rr({`O3Vp#8PcDpU2W3wR z_FXdNm2~Ge^!%tEh5Myn>#po+_a|=&DKgHp`n$;ZT~e+g^Uox~e>eI3PUfRjBCpA_ ztaYzbUE0lgoR)^Pi(HOJEw|oBn!a6T&JM^xi%5O!zM3Cs&r=vz>Vk3-FBfKY&Y=9P z$(Da*(H7e3CMZQ2=a#4&;|HmysKFdZOa83RT>5hm%@OX|m}}i$7LdyMNcYT6M{n+% zR-E|HS3TKNTnpLV@aY_gh-S0BP&+)`NGli^a!u**5cC^0Ep4ZAEG7iZm2s`7)?Xui>O$+c5)mQ0RHP*7 z6vi>55uwdl6ur%qKN5)`*Jh6fHWa7(o7~vUK4!dr*khvhS;JsGLUYT^X{PBqYdq5L z8maNv&$R<8c7OdK##C}hl*h!Rg4Gk`8s>Q)V*DYOZrMvx%&zf+lJmWp$2a!IZVt?0 z(56!&mpJ**{90kJ8yQwL4$<@}$tmljX}17Nd5lYjTxqP=zAnm>SZP>rr^0N|$Ov3o48 zew>P8p>OJ!J7(U566{2*I>k$$3JR=DAJNq%JTMyMJKlV|G%!+WKA2R!?vXa?^i3<_ z!4nnP>V^Ocw5+T$A7gS^>OmfPi!PJ3QT0k{HLZ@Rp(-f8BCKk2ev1Q9#ZVT~v7jo9 zR0*#rE6I>oMLziC_RQ{TOh@+Cf7eG#;$+IChA>1v=G3P@PAk!O?Qq9b{`9`-qO@SvsGZCfm15h2n*;+#&qPIrpgi%wZtB>+ zD;u>7N7!C`Q_=A?b7Q*H`4v})q@fb`G#Uk=4}BM75se%z=N-ftUrj3CCh`4K`S~=& zuugZxGoLG2gNqBS44NC`tzTUm(*pUGLDNTH>A4Ap)8zBolw>5U7dq;Jg_-|^W&EBD)?`R?e zExj`vYTx12JiIV#W=j6lF#&2=9K*L@Bv?4MoM5S1?6m|bUV?He90CPqvrZFU#RVHWfkj~a&_}*(SjVCKa0va}&Wa3D)I-OJZabCa-!p~d0X505N)fk6|PCggBTjqPRaT#m##bK@%YAvtrjU< zL8ko*LFZN3PdIH?lVT&2+NM1tBYf;xu(>lb1KHz6xm=gFTE-hb zWzhk^iv~B71|ilsup3z_1lUp|oEf1*13kEenc5DRWf^xx*_&k%K>{wMe5beS0-vDo z*BM$^j^{F`l~yp1YO zY12+wR!J?0Qm?7G1xgDRz((5mvo6*|!{h#b3y84sSE_@@y&H{+Yn;Hhn;=kLz}1ao7~&nnEH{XYdOw$u)v3lT{}}z%KHb~OmA3xi z@2~hqA8QeFi=Edx_tWF@4t8pYC8nv}zSnHP8nl65%6jGmi^$iY1!gn`qf5V;#@9~u znnu^U)bM26$S4e3)LlC`U9yLsO}}Uk#LYL<_bpTs4GaY$Sng|^r^|`YEv;jMQlNsk zgr$irW}7Bf)>4MP`Y`n8XL`<*F)fn-b3p_+^oHzB@avZlw)J5A8Ul|qL4UYz!q_&Y z4v35YJJXJJB;M`WgXEb{Z}KUBcZkT#4tYPr_(eLaolsv)%%?3r<-&pJsjRahF&tDv zBwK*BvPa4re7Cv^eEf*F%YKu{-r%BsRw~>dmlbD1^vRyG)Mb8WCF;!htv}T{;<3zo zm21A=|o(6idiR*UAI82;9m8>iHN7q`^XuhGFat_Aqx z+I@jEEB5>&OyZyT+qBz$=s}Jd>1~_bb*Ws_BWSzSQp4?#Lr>j_j>*xUAT}z_DD69Y z>87ZEL|bg>CO(vaM_c_+2K{ku*y-~LsnFG2Zn?jU42Dh`QyfnC^pNTn^$FtAv8XneQMVxBOJ(WryQEz{b6hWg<(o6$JE%Y3)=YA!kLBkp%Kqq z=v=ubG70Ewf2>`2Z;W8L2|rS<=(+t$(%2K5{1AH|6waq=gahhX@LRI}b`!9Exv%5o z1$Xdz3IQZOmtYRF0BPynpMIy97~)ZRY2@~BurORj8Pp)GU=bd=^=;2ZC-gx)Hsy7@ zonsPUDz7goZ5Viu>rF;>xH>j+D2hK9&`sQ%V>{!NHRk8qgyT3Tn0|*b>~UnKA6&u9JI6NA7o99P>z74?E-AlfKq= z>m|@h${uz)&Pj&zGD>-&d#yDEe4TCvFX+H8Y9kItpH8T^25G;iv&Sr9CM;SWIx ztL&L8zZSGWMXeIZ!#6}-{5=JB8ssfD;$kjJ{0u+&)dH(OO3K~RgFA&2S!#Rp^--P8 zLhLq)>c+Szo4j13mqvULO`rX5s<5&Z_4dWQE#b%kOT7@&rTgZ9g}-^~PP zZbnYWi8IXeMcQ27M{M_0KXu@CAdZv()cFFy#Izod?xxo39$JM-)@l52wwFU>XeF`Z zL*Yc#9H@;?y8G02AT8~yqguB|=z5d4RNkxlr-gDFNKsEiY-E@p*iSL!O>E0VAl~`e z4N&mjltpCzp2IL#F0UH>+_3z_=Cdk}mC@^8CpQUJHmaKdFknbfaQlq8w%yF3eltpk zrg$}K$Sv^qm20e7K0l+wbf*HyZ+!XTnAV0^qWFhniSOsYnP=ek3t)V4V?=61hU%}d zXP=Ddv61_KJ;*SoUl2mf455Wqi&jt2qHA{MKaV;JeCT&f(#rqd3LP)?#lYPpcAZXx z#R(yL>E?4R>eOduQ#6{*Pb=%j=6E50`FGS^IDEeKByCtve9!yd?Oo9rCVErvSQRx6L$MbJ2smlI25wQw(^}Pk^ zE{4?D#@E-KP1WM=cfQ3GyUAh8p=dAjpcrGIEn{U#M`{b>=w3FhEVu1@z^6{r`lHo+$9p-Q=dUVf!#wpkGnS*miC)j* zpwRQc>4g2xsWyW31Dkpq3oTVNN^@9)+k~Mz)J_zH>#{)FcLB=>bkYWMC3a6I}7fX7$#KOS(hY)y)%T^93}$iBnYk@snY(b<2=?L z9b#&3uKCk!Cws5J)-uBpN@c&JkxiCTTsr&*^WE~6pw4Du46nzLufPwbj&0ms(o@~* zHd5u=5YcKz%4GQnJxK(Scwj8ySNZTi(@B7LT9RGmT9rl<#{~=bY=>Z*YJ8ZT_@gA0Ss(iSwQk(?j$I!MbxYgI?*}TVzt?qkMw( zWq>F$`S7QQ8*z?pf?$(_F%GKn@1eKEW=#W7VP)!jY?BVteMoptO?Vw{H>iiI$JUxj zoYY4`HkP@vZf>Jl(}q1v6;e@HYR+c(KK`(3Nadjo({wY?;C6ILlsgO#ob!yrH$&1C z_vtD~;GkrxK~+~zb#}{J%a3sGrYv$5ze!h?94NL6Uth@F*(~;D1Jt`^FPw6g7s^Mg z=AX76UDLQmeYLwXl3g_H>gbZIwjxIZ1<>7v6!IV*qo&`RmynmiLhAc3MIhvyPWjr4 zHhlZ~va8;7EvjOt;B|Tl^*8=&jD4vu+#;uckyhX^k4$we}lBYI+cXZG)>x_9L3J zw!Ej0$SkNWh7S`96kP5l8GxYpoo?lMA6bhdxj)0>yb?B!TKIPf1v53pCPrr!DO?3L zxu`Z470esNvc13_jQf;kOnJx?j}K?4dgHu+$LKlZ6dqFp*(_;h`A6u&!;Q0>!r>my zf5VoYk&IS{K41WWta%W}vr0j%kYwHWeU=YO@K`2D^qq}=ZlsLwc7>BZ_)1+g#Gusq z%jzJhFCwxVhb8~my}3ZfIX4j(6%nF5LMBuV0Bao$QD;_Q!mzPZNQi?J#U~&C%C)|5 zY+K7?dwV(`Hd}t1VL|O1P0yws*lhL7E#rYL6dK?;pUu^!35^9m#y0*P@rIHQP+y0O z+B~hiNhN?(q6Krh*;PC|ekZXvWIGpYWng_EB;NdxsFnnyWbV^&DV7T@BA85Y_DIHw zrq0rCzZ^LenWZlI%8sa-D5pKq5)Bj@(+{rYKTAF#NzA03!@!s53$Nmd+L>whkKU-6 z#pPY<6~!r~@<|Sj!_5j@0o2$?Q9&G-Bra?$qc}g@?}H`|%tLqIDrkm6Lb#IMWs6;dvcw^jwKKO#c9{`j z>6p~%t{FGPg2E~|ZUzFtYbeszycq<3+gd|U6X=kV`LPeJX^7LXumf^wh{t)PeLM59*4<00Zr@%(?ojIIh-4K|ybPoMQZtPKh>K`# z$-A!T_UzqWD^uNteSKF0f65aB(12H0ajxJV#s$)kiuDvQgO zQIKo?6&f;HErH*7qdJn1l+A;Ko%#u`d##akVLivibvK&KCWiZxfO;id7|POPwP$`- znw&-#R0N$H7s(1MO(Oz-TwUFxbP$=IvVWojg4U#%01P;>D>vyI$jUPHrMq+J3|S;Kv9YrE7RiZ)ol1_)ZGJm_Deib>IBEf@{3-`x zLKw@+3LDiNJVM{#peXokBfg!t5KcLZyKNSa!r`_Vr|XjMyxR%Zc?N0}RVQss+c`hI zjrs28>>WE-_p_CWEo?^TD=R{P{mEAK=Sc=hBjD70Mk+Y;u zf%xURwWLpqpu86dDUpxD^@*2-O7(Po=Lq!r558)!5G%*T7^ewG6kWd4~H*!qQhrC zwRbgzyVSTIOf{eKtqpie{BbQOw8`WzDW3)_uz~Y+D;_I=#+5)&ed*hm4bciQKO)RXJ6 zLtX?T?KOLWrjZy42Fu-+kiEtvR0A;aFI96_M)_zwT-PH2@9(<4u8e{C=#g!Zbk(G! z%;`2%To^Pl3)PX`zA&hEMcfTi4qv~pmvvB(F7G)85?+-hALq`fF-e5TI`frQS&3gTg(#5UonYN z*25R1>4cjhWg){0`gZXzS|pVxp{60}*(wQ_)x)p+Tqi|wU=p|}ptb?ajD@~pb}wv> z(1lYl1rjPt%#lkT0$Z?^4}&|lzC+VZPI56rc z0vqYG1NHq;+QsFhawR-ZE*kGfb%B&xFliO1ixB`x6V}_<`%90|9hB%PN&ddDV&Y0LFhfOuh+7-uRdG;A zwrzLUPp|y^aA2OlkUy**RN5D)kYOWpKh`BfQfiY->|eRw{|yFhrL95pM~+WPgLj94 zuNXe@bw!tjpa=#r@zd<>tZ1nbBeC^E9M}_VWWN(J9lEb?l{xCE+vF~KE#V@;R#{&4 zT}Abfn|_XF6T$i-(Yh~%Vc66YP1ROmqNs<}c{WgO6SRSXjc@5TMwAB7sJQCOV1^iDPUmXMH?bxB1vPum0uNGlZnc_x5U@aD5ShdY4?GM z6+z8xFDM-jDxszOW){llRQ@_fx+iO(c5*iI*i0E4X^qK_=e(qP978L2awH z0cKZpUXzdUsI1eF>AIQ%oTJY=$;G}s8bLUrTi<{8!~r=)@iCAz2Gd@x-l-`|oyfB= z9hC7)Pren$!Jf$nc$(`s*d3(ucUYa~rH}8(X}e@@JUe9`vm#i3PXu`-*!Zd;u;N!g z3l7tcGQnbXMhv2(+&&?~s}J+7_^1JD&MOzrim+Eiq&J{a_qRB6z_o{thS=k2-gOiI zyB?`>0@U&#&nYjtSah(&V2;GWQ}d_>2uKJ01-aivMWV&$|Z@ z4ae1#=rOW*lz8If&|55oY>7AVhxbRqb!Abib}#>2QxtabI74iKfO=_M)R#_fQsSZu zAATehlNqZo!P2468*FeI6DpAv!0gQ|h?wBl@+nP;1BdDq8l2w_TpI!?(nXas(p@`OXpGy}bRA8lE9?ab>tPXoR;V)x!%h1g4^ zWF-qfGA;N*MOz`mgp;jp3Yy4jcXOt-b{RHc{0s-ykNE&);##84w-D2^&vpZA9s~(a zQuugwH}*!-07P2=;Kr3QOXK^hjFs2o;}>2MwT^1)QVUd$>SF*EKH?kP#P{AqvW)YN$pACh0N!dRV{=gN+7sWm}H!z-HOG_W#>lUIHj|_G1kOoCK{`fbOuBYj`b?U+2Jrm-``hDiR)N7;68MJJ;3k@+u*fyR6!q=Ak1>JJOkfOJ)GHenbWw&xmPI+nf3t|!8Mq5;v>=|a6L(e_eTFUJO zWr@ScLX?)gFZDE8wfdhPsEo>Jjt3>>nSdKb??5XuKFP(7b1q(vd%u)_5^Hd5F7pLE zi&><=mzD6?EHr}bb`!Ve|D`_faT(4ENKayCiDq$bIH9Fc4IbGIWTmx@XR+xek{b9G zPm7&e0FQ%I(QeQRIq9#(*oP%F3m-4}_O!Pb$MAAq|J8i^8Qx!q!B5@_{vP_Co3`4D}mCobD5(hQ~ zxESkFTwM*!AwTFlP7YUpgY5lo^U@&XO#*~Vt`C$uTitDuFDStCEmwdNFvX*k``GrX zyiea_rx$XjJ}XN)PnR*^r5g_Ms7*gkKLZ!G8yt{1Ey{%uP@aZ*-t5p-J!BwQ-^NAl z_(-2FgHJUK4K-rDEM2!PKiSZ5mdo75yvDX6LnMTx_WsFMMmq5Z(1d{P<`HA4r;4^C zw{ntrfDJhgK$IRJT9BS^Pn=AVT1}N$Y(7$?%;)Ny?>-Sh3ao%PBTv^S=JHtWEtf+d zR_ng|_sBnuc8kreqdQ_cWZzFjqFr-9m*aEHvsDTp8h*Swl25P2VE!Hl=Xzm&p`xog zmDL!Cq_rN|khS`4_a2*mYPN?@Fs-;V;FHlyY+6+gSaGr*UauRG+S=Y=jJ31JO$3HV zQmLXv$GbD|MPS(=44)?&9w7(&D0QNvoURMW8#-|eg7=$%cnFc4_L*@8@KD?hd+sbg zi5uHLRm91(FL6*zWXT>8sh1?U?uG!GVBkJq3t*VN2ND zyeZl{od4S`q1!#gX$U39C!rs>I-9;R38*+CSpSZT>gh0zOiuoK<;YuXCfN6oBuQN8 z)Nt-Ao5Fw`1I;@zc4^fd8^1hk!raF|Qa+~Y^gPySPxZqWggAPy3?lwWe~ohWl{gBG z6RO$TGb*#yGQ&ktd=+fbiF=PpORHK0tzR0c@@8n6Z`jM8e3h@Snc8)jRJrYWpW-I<{ zj^86qFpf<--ilmVT)nucbE1rHOdh={fMJb&AGY57l%Wt`EGw&M;bXlCBM-9cIqAd9tL%KoEA^+gKB{Rv2i_jC?YxrrEGkmWtssQEbi@8fzq%RTZID zD9y|ZHvlTLhvPanZ8cQ}-}R1MU2WILHRz|KG{0=EO_IdDs`9a8$L7ujf_3tftCOyb zI_Z1(v{eJ-?~kt*S;-}G59?I+Kc6NGVP+p*;4RRYtyZE8rM6FnC8hn2LsGo7RXOqa zgVJ}SYE99Fy!+iYn*13aly;wiUr%_KX~wDq6u!o3WffDH>X5$1FK7i^OP<7$mI zyb3GcN_b7b&U;{Fs26X1<}Qht5SDvx-R z!xXU;HhLW+Y?HZH`2ymKdJa>&mhW^u)~_i_uNmrZydNayLrkeWYl{fWr<{h@NK^R7 zKf?89(Gqej>EI~63!nP6&DrG9y_md4G8IvJE+6`E|CV+T{hv`k@6ojY4J)0~^v0{1 zY!6hA-WK(!D#~B5zN=4p zzwhnMU)5gA;7xXBj*d~x+3`B=e$CFMRm%bU@r^vDdR>PzdKEy51x~`m*CUO~EPmC&I@ZSrhl9sgifnK@} z79y^<98=_Wqil#UxtwqgKMXuRA4qpkCCw-hq37Hxo#Y+yc;_> z7KYx584#iE81LZkF0{u>(NuFxXMEg5M|U2*f7ys3jLx4(4!Bg@*@vm86qym@kMSC# zkDD6W2N$YlmxA0+iPcgi^e!bNuT$`(@7lwGnWO6rmC>SZx*z_ohukN}nwB+eEHB^N z|Cx}eX`|4EuHDb;956c)ysHhU-;W|vNZ+sYKQ9tgkX5m2K6m)AWzH0C>Ua8su(jgY z@H4TPB;g&QKh68*Ns7Ro(*O05fxAX9ocn*5|8+tE!>IlLmHqD%3RwS3?|;kwdqN=z z1Ty{ap@;;uXTk9347mRT!@u^!gr8U;4Bh{Gw^&yQ?HNbPo1q=oY9_zVo zTx*fs(U`G7;dpI8e!@ z+rfgju-&04kt=*tm#8}d7GraB?iS|MOyLtV_k|ffzS+C=iZ$=N25j2fv>;2}gERqO ptO@`A&ykkZ?Bi^HCl=(@Ew=Hu;ew+pf2F(XO7d!QB{J_m|34VjQj`Dy diff --git a/logo/logo-text-dark.svg b/logo/logo-text-dark.svg deleted file mode 100644 index 52bdb4e1e12..00000000000 --- a/logo/logo-text-dark.svg +++ /dev/null @@ -1,110 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-text-mixed.png b/logo/logo-text-mixed.png deleted file mode 100644 index a4b3b39d8a7df9fd81c53856ba70e8f3be8bffe8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16076 zcmXwg1yq|&(>86P*n<`=TC}*kLxEDPxQC*}U4plvK=I-Z#f!TKXmHo!lHgD*c=12s z{r;14LJo7!?(FXD?Cjjv75-685eMrv777Xqj?(*gnkXpG!HD187%vgupESAx5r1Bq zD=WT3c|?AGwihR$ph$jIdMBd|Ts+LeOeLCW=zqf%>0@AedDza*#g(B1ASCqRE(0m# zg)8$;ey3C6`j7T4s8-e6^peU7{h7wQIGx{UMA{!^tddMF1E5z{*1x>fJn?WxJiJ$o znp(?RA3KlRa+uV-=iK&ZJJqtzq#Ep=S^Y%~&KbN43CuZub+vU(FL8Enu8?iYK764l zIPtiu^ccu4csV)W^%(u|=yUxr0P{C(_W3H6ILAG|#wHw(7z7H+Zvmp4UcA#6Yheb> z@n0kqANd_(r7fw#wS4*G>F@5G!iz%fVXw((!Y1Lx>f*f9$$ z>x~el=hHJVVV}{Ss<+Q?HM6Ds7nE|1wEhSQ;dfwvev+aH%bEo3IPakKpc3W~vkUZfqoBmf=D|TJpdX=X9khB+1qVN!>kN&~N1PTe? z+0SC;?=3f9_82h)21W8=KFKRV9G&KXR>k(W9#!PT#>RS`@BFbp|F79UL)hOQRWv3q z$Z0*QRNaLY1g)tmxd?N54UoQoG9c>~*{lAZtRvG$wkaKcpNh5g0OG?%iheNuab)jM z@o5cK`VTPTU*k8E+ONvT{XdgK(zgz%Li=4-*MI2}(74(y|4KeRpN%A++v)qn7nr`#=%z zRpP6Asu5wN^b1k^4mMN)f3`v@Y~jg#7_G?Hi3{s~?(Y*G(W5OL zE(F$ePg>T^eERiN)y=4rdlBD{@9sYB3ZSYE?6=dtLM=j1J4()TByra+1e zRApR$QOkRzNYh%EXLL*Yree+#u4|hD`~3!iyCrjthdZu1|C7LC zj2YZkzfE*ETOVwj%cs*dxF#VbzY|>0IqP1LdTxT6yWjtSw*3s0zQfh~4=EFW;k_Ku z@Ivw>PnemjwhMs;a@TEOJx7SXl&yX6ucM=Rjw|O#$+TwloPrOj(TwY6`p+&~T^ucg z=Rb~aX34kb;ielyPa15ru5K^nJ)2~HS&cWB6tuljP!|XV3-{a1dKPGPx@S}WG=yOx zeOlVL*GZaRUc>M*s_Tp`0QSe_ZePj1w(ZEDKZL&HNPQD|XIp|lSumSdemq-&(%t^cBT(2x%MB~jm2$s9u z9QbU6lLBUF_AbKbp0?u5lVk1lR!ZG*eP1~s=kSdcP|K9GAEhE;ugiek3cK+h#Y7n_n?FN^uj7KMDl_E8M@Am2_UVV68r zwu>Fb<=k0#)mkYkLhXxW4F&!qV)=jB@*j&ZRr&=7gXZelx|5#b7RMhh0w}LhhwQT1 zpNTK)e-_+b-F>r&z>@?WFZA9byLFHgNuZ}a;TkxzLW|sBSOlD z{@^7)2$5pSAX?zRTTx^t0zbMG*c|?q$)4bFEkw?~H2Gf_0m&u#71PCY9w)~YisyQo z@LF%@XAN{-fnq(C3Kd2{xO!~J)(Qq$IPXQm!%rOuU9Q4H$<;?wo!Oz)qr$-RZ?4y; zTWrrOL54d=V*VrlTn??L(`eDE_9fVWvDw0o_2h7sp~hgakI`BL8ltH${6s5MsT^I0 zabL&N>GVRe2)iS79r;bLcu!I{(diUyHZ3h6r=ERA%5$%&LdA2m;ciB2kc4eA977M~ z8-og4oQHvz#x+Q*eyrcnoft$MQ4f0LE|rc8TO1~vgqkE*Lhbm8>3O%CtEE>OuRb;0 zFINY|P8L1>vja6{=0W*L*##ahZGwh#W+eS{DUDl@O;!K5Bw+kiI(%+p8o?IKfLL?N?%3&XZbUR_h>T5$!L$dlc*^{V3)Jpj^ zv%w`zLl@vA+$;m`dmTildqg_SLGCxKVa6}8nm~7 z^YHb!k(S46O3?$ui2J@?*5RUt*wpYUGP6rC|0LA`r+E$mOFmN%mL#3pFP-s&eUD2+@-uL zeT(RqJ7~qQyb5&sRTmG~b8zmah&XV{zK<&Ql8=VYi>g`F^G&u*!dsUJF_$qMhdUFDTcY$t!k1WpWg1LaQcF1`D2nVqe+LlSH>el>@|LSuYdA$vEvEf znzX*_*P8j9lTzMOQqX+PnDOcKDHb7GC+CTR={Z-1X9I5`6dnKVgq;5tsm$E}E2tRu zF3)p%EFdO8cd!IP`cPJB|EBog3D*`{5AACj*Fbm!J(TNm!g-A4EiN=Q!oIG$q&eoY z>bp}{!k)L@q_FX{R{-zGW5>6~vm*vmlMGa8Zr0~Lx#^wEwcj7^;|yi}S&45Kkx9n6 zpIqLp!A8g7Hl6(IYhI||@QcGv>6~Ll3;A`6VvBdUQ}aXJD#Y0D07Dny%u3=71@3OI zoFgsV<-e;>3wjhmAGj9}j-j#^3v9goh3n&g!FkYu5o za~y84)z9K=2yk(|efX_g?lfx&KH5R=!kfPABoC5Iy}e?U`ZF{2vA=7@%cp`jR_|`3 zHMY+`y)1TGFi>EtpYd%6WK3ln935nQsW9&SON|n;@3ez<=`!ju>%3$k{^!_M9b@&g zslo32=cLTn&)1zrIhmZy35Z*h1Dm=)zv|^2NtCvG%XCMsRJtscuoFVcedp%#reAA8ejeo{!gk4tf+Oz3WrIWav6(b$VM;Q!F*+5uVX zEw9)#ZyTKwxd_W>(TBUTE~Lul{3{9FnERd2DK_jV@l&()IaU8FTVZc|_vdR0i`4b^ z>ru)(yS!9*m**4Wzbo9iZ)Q1{yKuTv^2_UIoixSu_#80-t+A|kP6n*2UL^mzg@#w_ z)(w+eZBJLG_mswkxrEW04z}{diz%sIr{D?{jrc)Rz8!XxtZ5e;CBUR8Tgzt1%4vSWaUoAo;jcIq83JSi`=UF=#g;`esmC*6xmJm&Eji=-LC=0RU6U4?eBw{8&pki z{AZxs`yY9*I&*^gV+wgDE{IhNf3$xN_cLJwvSpESCM8kOZtqI>`iN3dMTm-yM6V#pLBRQ6IO*UO-$4S`e?+ljMvC}8I$jy&?-WL#GaQ50 zO+U%^^!qTb4wp&(tqn1&uTr(=6j#>!#$C(HHGkbEn3oemot8stb7)iC*f_;=+UH;J zHg7k%k`k()6`FMz#MxkrqnEeYRbnA~b)^~NXh|HLePOgwPe&yuG{JyJ1{6YK_I**%>Zo58)yu3j530x==9gvo^2v z)-Y0A!Q6_H%J3p!QP_9AUOPBy(3xFOYM2GXfu5;5zQIT(ZX|9cAS_lXz-u+EtWFcY zsBZE7po!J%4N!tW$w-6w?E@9)YVo~#b4?pnNRMPv7;zQm>aH(-8pFmmOaL(oM#)@V ztTte+R%nqAu(QJ7U}#XsU;nsFs+WFBXU)t?p*BPH1G8p-E|+D<8Az*qsd0&@RyRff zTfOml9hRa+7$S?lW8YcfG;Ek`)h}A!NW)G}kF!zB(QmK~+ZuPPtSSu-PC=`(7276t zF8FXD3-@K3S`t!5J@#SimG#eRX{z35t>H19ogGd93H=RPWywc#))x0viCe9psw1DS zS#jqt?|0XZa9OxDoO@74fXgw*R0QvCg8u`(h?mW(x95^`kNm$ zx3IWosP~Cxee(gA;||bhgx2H$-V71P%;5)2np_J+wm~A)7F#cG-;&NtMwRW%J)F zJ%8{DQP17=97zRO6aNb1-f`w^yoAgT&m?R8H=4%*8N_1k^g{0;D3E)|147Y2r|A>X z$c%7;b|RTq)4WFTcqM<3U3bup76r>z*p^IQ7heq)dT~KVrfb4);y6dk-T^WVv%_JOa)vchf2xb*D99_d{%0gz&Nwp-YduB4j zHrHapjj6A+gUvaRXS^n6-v91G2N=sv0QS$S_x=da-fl4mSUj7Y82Yp%?_15XeYhT` zAliX`aFm~5a;b7`Qb!P~8`sfu>Kj6GEs&2ZHr_%!aq@+uw|5Xr`D8ng9_7@UJN04X+ao>a-Jgm* zy%LYlT-#nP4m-SZ^fMt@AsRDk$%4jUIod8kvXbw?h`W|C3Yw;z<+*l=ySw-8S4gy&xxRIUhdBqCFyf_5 z{-`+*)yt2Ec%8*GQkNEmor+gJ!T#z?L9?hqzxD*4*&|MaFJwGC~H71pvBU@&)sw1DFUW5A4HX{1J$A3w{A&l#at>gLE zp`$cDTDCNf9X!7<13%SJniYpxmt1z9Ul~@)oT+}fGvXySwHvESi9L$2(!`^W@nvt^ zJ_gp#_|-_37qZj}1zJIFtH$JbmEj7#N7GLD zZTMhaI{I8uwF8S7)4QAx1n)Vp0Y5(wk0z4t1wz>DMQIicho2p52U?BCSuE(?>o&^k zz)OJ-hQh0zI?ApXrbR;J&*Ve}>yRmGJ77GWc+hiuhnXaVro# z{57{eL;5u5EGsxGoio7sR6bPs%7YgAX(1yzU+GY_t2^4Fc&u5My|$W4XrCkBU@mTE zad}k@P@l>A+tF<$J#qw(i1oS+G1-3iOg9b)*FfBqzL|@}!B&Dco9kV?TVNFx`iH*q z2c=#vAed(gEc&q6<5_aj+$<|-?l0R_eYgo9D<{=O=DNOb$ihF_+Jv`-*usoJ&jP+b zV2QQwd1V7x0DLe{u1L?I?7Q#_nALFC7)^(WZ^f{_!QUFp{IHGAQgPJD%x0N&obK5C zkRn_^+~U0WgbXt9-I&|-%VCa85pM_?&jn5$UuseS+?&R+d=We?A7Z!F7g@#i;Vq{Q z>M^>#U`9tXE698J=!K=0plAt=yJ~svY@dThvP?9y-89-`Vnz=qsWnBz?|knsL#539%uG_o5!~{f zw=u9OVNjy-E;BUj+cm42(Co2t+~kzx1S|DmNj>-4+>C>K(wGekcJB}qHtX&0!1}!d zBE7w=VfoKqW4#TatNduOsBMXU{hNaZkoPzwKY5ATlnp3!xadr+IlV5LWKX@s@Y<^3mp<9tGR4X zV+JlPzTc9WRLb?R0>mXz?3hle3h6tdc6kprn>K61FQ*p-#vMUIjoXyT%bm}B(VRC; z!q30-SNd}szH)m{mvZS7U@LLTNvhk*GSrDlymOl2d96NtFxyv*-*fx3KOYnr{-!&pqXDecPC@Yly|{x499*d@S_y)c%q$H$Efn7 z;?X?d{x4waWCxs7|CX)VJrtx^31MM4*$I_Ba-yA@84FjpFD8j$zoZoE7Iu^EWP7ei zD?(^56k)R^a35wbWDgCD>2NFk;_yaK5Jw0Tumy@G3OB)6J;z+tFMKo@Jd$y&(}62c zynM%5N(Oar;=owdZg=XW1E%=O!}~JPhZwb9P*0Gta5N-fJ9&AjBy@k%ln%o>BpksR zTsEm0x6>l+N8@TrzhBC~Poj^>FB;{G9Q4?1aobE<+AY%b_oTc5+1eH<|8i$DO*(`T zy_li2!T_X&HB97GvddZ<8Ccc$f!b&a*Iy#S?q=WigwKz*!L$trx-#ZA(3~AD1ibkiWXAfYG{9_Y%j1~fF<#mVZd=vGf#seN%@!jY0 z_JXjx5V=1(Wd8@OP`66wSp!MIWEVpj8Ye!uWVfE=(zX*4U-;voYfL_tu!N8` zYu36I0AF+$;h}l`-T}vpmsygY9{h|)=1s&^ydG+mf1)O3N#P(=ME8#IjA^?_7%$k} z>a~nZ#E*`B-eui>UwYGdj#)Pk$qGUikzt+4lrVshv0 zV&V_#L_#;%Zg_Jgk!mjn1^^w^`GCc*sv_WLuTYYZKE`T6m9<_iq%G!tz4XCGvY>u+ zT7g_9p9y%qh$-aN0&CY=GF*#v_-xOT{&=;CCKf~r65?#QZm5xUokHQlSk>1)Dc^i8 z!LMq5>mvSD@z5hfS#qFhx&cq4DSp6u;jMEZ_j$(9F|3KJc1I?1f@)FmLLrPKU(En(=hH;pQesi_@Az-!5^ER#xmkpN_s=CsmDkC3jK-HFCHog;n_? zQP8aqbiP5hWY>8Dl`kSA3T=2!1};hm6gU@GcRj}h&BY6+_d0UwT!y%-?-kuB65_0A z8#ctd2ut5y+7Es==Gxj1+m_hMevCYOo<&BPJLEWdBBdR}xTKgyVmfB*m$4@vyK|?& z=FVb-%GKUr_lrOF{TIYeHy$bQgT5t(iHfcs7nD`F@O$esNz-MAUzzaGm||vd*T_0Y zB>U5obgU*$TgTT|sinar{8n$uZiy)xH7r2I+NU)mLxG&@t-1sE-Lfe)3XZ-RBJJT* z45Bz0+K2A!Q?DA{Y@Y{9N&OktBx)SzE*+s+g}#gjEEl1I0d-;k;`?oO?u0q=fVaC9c`WNpXdFh)MWXoIg(%es|Xku zG#>R%qg1mIbJZsIr%0?$4Aj{Z3y?m|D0&r5AK?&J$fqMG+7P}7`=B6y{Jz$SGv1gl zsN&?uY;q3U`f~{n|6(VL6RVpm3*JdHr_pRLNXD;ThrGEfXHEl)5@v++TD`O1r(~*{fB}O)2StHYtlv$%~lT~kU^3o-4&P{7P6mTBN_Pv(ZPVI% zT-#e=LuO-c?;Tg=Pc2P=xIs~8!J)|KV!Us~Sb9!%6B=qNc+At^8N2b%fZIiK8y{0bgmKG81ulRBqA9 z#yH&iM(~GtblT|=gGJqW^RxPUFev&zTdoTzD#6o1)9j+XLOr{eQDz=uKkN@0n)1h6Bo*g{y`#}5{1z_upd;+l9Szo#z6}Qns zf(}snb}BD4XGZ&`171TrHwBmnrleq|q!}p&AO#B$Ls#${v0dj(g7cHTLMqYGjwQEz zjM$wtc-rTr2BJewF|S3<2@*IV!vw$Ig+A!Q^oFfZ64F{NEI65KP7&FpSi+3Mpo3SvK}Z8q1uZURmVmQ0Ovqm zLr#awi&kB=R;Az5k_|N*e{(s3Y#9#kifa4o(tORVT--Bl_Rr*t81wEo;9jQ!{mP&e zH+VqT@$c6{I!rrR?Oq&3xgJUW`TUDt>od)8vm)trV}o~4ooCEB)fB4@M|y*V5V^>- z1O!(U%~?kg{SuzEm)K`ZkV&0Ofml}T)!kkRRFDvxJ8*dZ>Q9WibtA?q>mH7>F;o0( zsUUj|GjO^Cx}{6`pEv^$`p0;lIIGS&A7kH{xw(1Fjfc9ODluSU_QZO<^rG0H*xBuj z0eyRpUTxXIm=VafR{u@#&jj6qp!n^OEJx?gH(15DX~OtJ^kNwo(QjJP@KzJ}Y1tIq z5g%+|P~W$~pOvA@+N(kR!FE2^sw*|bVa>WxqWa=!i7|@5an!XAMoilL zHex(jYv0JBQgjwAPO?tt2pn=e)e(Bzl~tcjnls;1`Nn`A$Y#(QST-5u(7Z0t{_CTP z+51A+7JuZr@U4sU4nz}2kQT_N15e+ey=9YyTNlFGHRIUZCj9yfWpa97bI}M?HfV$zM7B>i}!%XV1~IfTikawRU3K20V03a z=!(@s%h0&Gk5E_o)v7LUr=fQrmz;%JfN3eK0L^^I#SrBbO}GCLwW>EjD(7n~$Uf$z zm`PDcks|7`d~pPys@$XWzwyLZ5`#D)jzp9D@e3fmgVN*uQ@MTnm@=)`B$@>`mxj0t zW3Hv~D!~|Dj&b)moB(Zj_kHQ5UnJMv&!X##>;xf-Icz}ay?539{Y-|puL8XG(Mh!* zzFx?S0l-+bZzGYwfMhs9lm)dnmF*hH2BP>+p$Yn1bFS7AA0pgZS2%zz6}9^1qr#Gy z2Ua67xCizs>t?p))x!;YO4+Z#Sfd)?xNf#&;k9tJv!4|k_8f)(-rxQw_A!-o1$U;8 z5tly=UJIi-UC*@Z>|BsFGHJqCrMktP0siNyA0q>Am4mxU`2@=MRTWqWe()nUz0dZ@ zYhXgHnz_emYS`CX_^DJ})$inGlqCO3o+FDGyq>zp`=f8RRju1G+V)}9_NI2R9vV;S zSMhEV%m5U*8OW;LoTIEFst9*qwl+HU9mX^9oof)b-z6Ko4_Jz85XXOL8$IJ5-@7A3 zj34na0txx2`9@0lA9^dW*@$!%p0`I@0_^4Be+j_+IYqn6P5Y5OpKYBy`Iq*G#=mAE zVHZ?{;Lq+6%0U3O;ZG2SxlO6&+-Fq5TWShl7M`>7p>la<<>xWz+zP}l#3xOQ*ydSP zrp3_BLeD2eYx)&LW!x8zV!SfV2#ZK3jZiLxFmOqo@$`hG|4cG{K7Sg)9?v>cjAgWrztU zDfshQK-`)SWDMFszNLLwuRDhKQ{xeqE>>2<_cD5{hA8>-9#m@fhnj~>m;`x6hFOdK zN-`{IEN5%BCk8BE;GXN-E=;ib6&8Z{%rFC?YX5G;(oHXlY8A`Wmr)=YqL7SzycySn zi`KM#wt%yhc=mQ#Rd^`Pm*t8z{rF;=KMEhf#a01yJck?*`^B=+9c@xI^vQF>pu!K@ zY3a3_rP*?X?Xi>I4t1{VBnzf<`u@sNuvPk7vE}-=m(@%ed9_n-AQ@GuG?IhY*p#5T z0v%{^SwN7|aSc8gAB<^=y@f8z>7~?}qW=9HnRdxe63>EDaR6-;CtoMbhx>PT*j3 zgJ+c;bo?=gbBP;cRfPyVd#Ke+*3}mn^A&c{0gH~9ejnr${OE#?xU9%rUq{azj`jHO zpjH7xBk4_o?ubyU#$-RVD&A?So=1cafl@R>#pCOi$I}E%BwI~BQg#r4gwR0V3OLj) zt?hwtfPCukp>64f+QGaJ?ihe$?A1)^<~WdW)-y=Pwl@0ZwGNx~%HcQI)=S20pHCn0 z!FTC^{cEkis7$ye%?&QDG(rTY0}>upuYBqboY~trkPsexFfR5g;M-cIgt+-# z6X$)NMcBzw$3WMf-oGKlj_ak2ciw-J$+VDr76QBYBd3 zp23~5v(W&wog~mPAArfZJzKj#@ZD+g zs&%VDFG;{A=|FG>0i8I_gGGA+;stNV1tG&@DArE4{No<>7myI0HO|%$jjk}_&VOGu zvn`*CaalpiN`A5%p*piStT|)Nx{VS6cBD2c{A?^KQJr66uSSbqkvrbRrmRAB1NzAK zn+}nipy2{5RB}nH9XmsUjXX&n8jg07PSNwmmcT5xxi1=&LK9Ckc01`n2xezj!l15R zBp653Oe9a3E(fUXFU@B;fX+TY?@4KaTlwHAdz`K1;k%dKH~CQEt&-cf0-{p^;5R@M zTA(+LxVZ=7nt6yh25g1$fPdTW> zO`89shsp$ z4gee*uWCks>Kux(n$N68anC#==;W}fx`fJtfswz{Ox_pwsuJwwFm04qc}FcZsX>{ztj~NVouE$-f~Z`mRq}QT`abdtE`XY2;eZD#Vn4<) z^=UtP?dtcDSic3;Fad&4Z_8W(u_G46AO%Fx^gOM|iTHhK(fwd+`a-9k+ZE^D-9Ror znDc|J4O1ZF($VY+; zQN{sqBURcNJWkE?XUN1nT%UXZeywD1>yw$a&>D!-`-n1Abcq3pFlLspRcFO6Tl?EjOcXE0qb<^{$jBcV1YVZ=Ve<`@!JQP{otMaO5 zN0SStG`1|)ho_Fwl^-+Ce(0Q~R45uJz79Cp5&!JCk2qSSm%u%0`}23!CJ$Pju)^6K znG^J@B7p(#g0hj8%|9UWS8Y~ zN?m92d~f6dxML%I@$72wM)_VxTbMTB72fxzM8Ea~GL6$`g>yHPB*W{ezWpNms@#9X zmcpsTtAq^)Zdv%SlzU=F-eWs5uk1pbOe79Tj~PhAJ`e!t4f)A&Yxct`2I70ZhhG)M zU`>Y6S#|z(OyqP|=fD}mnaotLQFax}ZiK(41PO(Jgi3vS$V)ssn$LGgW~UXm&UU*i zG8g+((U$zMTJEV3k%ARy)h8t;X$x-fs6e2xtd^F#pb)4hB9CoEqd>SVSGN#%!*kBF zKHt8fJpY;Jhp(Cz#V?CnTw+LWLI=fN^J9qCNd9W9s9$O^`F<`-s`G25T{-*-Q_VPL zF78J{T{A5^ndURO%9OP0l}X-InRNux+i@{+k(El48QQA6fMno8C>B`eW;nIyy1Y44 zmisHhU&q_{@R=0}zc=Nxe~M;dYT0G=(b0?Nf{}xWx9IlJqS{lpI^meu;J-Xzmpwv+ zq0a0>Y=5UgT9rU|)8qkIN1wP-sl%KH^^o~P0k%jl=+Yvh1$NzoE_j+?KjgX3*Z}i0 zfg7HuTx7!TEy0-|ftvN=H=nGoyhqNnvX--=u~GI6k0Cw>#+H^Ouj5`|jwY?1kK6ip z>&3DU3)z2#sVaZ-4oSq%54-txrk!g7tmK-KU@Vc@rF=X2X`kx%b6(E1K& z1dre|p8w1Nov8z*(9IW0sl`KAyMk}6T2hHXLL3CEdY&Ct8he?8IA1i~4g&IGWj~qW zqdKRf9gDFE>W^h?F}*^M&F0n|&D~%gg>u%XsxO0g$Pn(hT9%Kt91=~J|M$AVtHm(e%D$HE7O| z|Cr*@#`3NZMclPSdrInd23|G!t;>Ijm`8#a-V3n_ISD;X6{$~c2zhNI>XV40|2y%) z+m0dPrT8s{+3J^3t~IoDon~u#h6fTXK$b#zw;&0swL;?s%iFnMp8Mm23R_cxdW`KV zZ-81{z|nJJ5&M>~b%0;TM>W=rmn`tUbadKaI9Vahp0Msb3~;}a=9|3 z=lwo7!A3-jvRfU_l~378H#|-eu*fKHi~1<~3FP5K2-Y4cpv~AgX z9va}$WijWz9F^$w3e{QDmEUAFN8@g2klEddr7Uxt0t3p(=DVk#TuM5+M;g@90XKEz zJb%@mF#Otpb-&(>Ic!3nz>%D0YSH-9b_dwq^}~(t?K`r~vb&-L(8M!HgRIx%FPR?C z7)*en9z*0Tb#zK#(Z@tTup0Njv_FzA-!rslO~u?*wxU&P{U6F-fk(cVI5R94<>9K6 zok<#3^aiR3%>C=&lp}Q7FkQOji^&pw0okX6D3bzB7rw_h1>f3-r>B+qeo?>``p@fM z#3GJhD4g79o_1$ZX-(FW6_4W zXQZYyZFWA0tD?q$t0!kw5?#2cC>05+Y>?Zctpc{S(b)j&f+NRU+?hIp85;JbZ}g1L z9AGBk0@guppgQx4V9aH$o>Qs53`g_}2?gdLw?YhoCMh)meDL5qx!o&cb_Un49Y5kT zLKZ+mwFWh|G89Qr%M*FJ5k+VXPp_=EV<7Evb8-;~H|BTSpO8EdM{P4^Sd0yx*z3e0N6BEU49SceOmp=-eBFcY@nXhcAjq2PpyU{|v4(xBI%gu#Wx|3c|G%g4FmZnURR|YP1Rd^&+?r3S& z1WopdDK+ibIG99GkbU=!HEMmx$+vmXo2Im_qB&D;;+@lTSp~PdQ#?+k&y++F!aWPZ5RGncnvu{peFE(SS6UpwAnCS^XcSy4+}bGQ!M(uH!($3&mM7XQ24iA$9mz5jl>s?l^UuI zFt3(#Rv_)p)S&2xUkcJncS6d*5q{i89y+sIZ%MuL*4vbldBxCtMGvPV^_f6cydm!- zhj?>Lk`H%Z6xOf23jm)A>UI3o9XLb{Ctw300QxGMw zitT3FoUbo!jF4i>2B7vBCd7Z&baEL!`{lNIy1-uTIXcg;7e;x07&|ljL%EfDQ(L!p zjoc5J2t4Kb9L_2cILXB#9DyTdV7D%#6cRkPzS*Rf@L5!lS@fyP?H%Rt(-&1hzU(dB zZz{SMmVrNKMonG-cSTkA5*w=in<3k=6U{pN?{~v z(}`Jk%8&Bab82RNZ6rhYFIXTWTJS}7jX(Q*q1kA%Nu?My*i_d&x?S+Vw*d(brxio3 zs`zWqOoe~ow);=1t~x5eQui`s>6s8QeU!C{qM@3}uk#1hlv@xnqVk#%XD+mzI)Sgp zgX}*ZbG6$Gwcov}!=gFMN}GR{f!DZgzO|$vgU4-=!uqp-(AhdgB%dDqrJU#ghvc#b zR8~2{VQ9h#D#dzil-T0M+z+AGjyNL(mRzuJ^WMWSC(GstBxw2veyweqlb%QUa`Ort z%fUi$_gZf><}>T6VG|QX~GX zAI?cp(^zi*&KC6>bHBa3xh>;0IMA#*ey?ZzL_K+r)zp`d%lG*p0?+BB&GA@9Jp$k) zOW@oYs+zq&7_jKKn@XNEW(hHZ;X$MR*H1F-3AbVxikV*}J+qn&yofwT!JWrULD8Ef zsHwW}A6@0sg6h8lKXa|(eB*) zuRPpMY$JZvqE2>KA#5H^j2GG5`~2aG+@i_kgHFN2Wpqdxxf&H_Y`S@ir(02!e%}u zJ;$eaYr|?-$w)|dF!LynT8;!Bgl(`_*{ZKcr(Br~f z*Agl9p)5Z*1MS$H^~Ro%I(tj@DuUQR%QzP~$cg8~)e?zzJg;_n7WHe)C{UfFRNF2y zdIRxWYbafixOOJwW9Kr->I}RjMRN1i1535H(z95r_lhwYy#Z*?p5@*S3ImWYy<#OJa@86{Do%QG;)O^XUp#M0$1#`>VlnpndRBUCW^gCHUi|sV-bLe%0i>_<%jpCT0{U= zwsA7Zu)MEvx!#{O%hM3v|z4c2OKVwIvrSBHZzS-r=myO z(#%hH2d67vfsGydp#kn?+4!}|i!UQ8wY=3?8^`s=$>6r!SLHZVnU;dmQ%*ZA*Aa z{txmk`4rJ4eogW>ClP@-vM6yA-OCrPYy-}9P`apP;eU zZ>QoCk>Z$~ek*stSJ6GuiVjx97=&&*(syJZJmwKdx_Si3S-5684@G5 zVD_{$D3%x(i;egGPPEHkdsa&hh+Ef&muo5!+36SZr)m08($$2BVfl^`N_SRCx6eI) zVSmH=KT>P$HzdK1=+Go{Pd24>iV8O7v7C$hnF?tr?d>lrdo8fki!%={7iQ6;_hHlj zTe7WXWbAMuAjRxGBc}tJZsU3g*C91fV60dH>%|DNWN){|O2e`%?&qgSh{EtKuU#k( zXngpl#xGNB#1%nmm3UJ46fXNlQuVrBhOi^dm~xV1C&mrwxWX1sQt9wz5~@e~#Uf$N zZC*%QHIgy;`DwLKY;w8Z2Fg^TSGCZVwndBeuSGp+pfLYZ#i2Z6Tx$`)k-hqFi{$F9 z153yE>Hi5SLn6CR{kzm - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logos.pdf b/logo/logos.pdf deleted file mode 100644 index ed7a1f5f84cb7cbf18afd9fb2027a743774607b7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 288304 zcmeEv3A`g!mA8r@f+)C)qR(c;C3e@As#G=flge7Dq>@UiQdtzWwj{NsQfsQ>GJ?2$ zDvIOwsVFikt^+Q^pd#+#IHQiEeu_HdzOQ_)sH5La@_I{m%RJ$8c*y(x`gLB_z31M0 z?*E>1?m6e)Jd7`vcO!f7Qy%t>Z+`l-r<{arz#Fb&o^sAP5T}Q-CWrC{3ZAv0W`XzJ zsqTk{9)Pz+@UPi;EU|cZ9@O+T-QUP93%2tW-;JihW6#^%1hp!`Rw3{gFOT$K16vjo z)2R*>0DapXZEl{TPqan5i?+7fuj`XhINrc9lENS%Qo==r9#@B!HTU;!b2A8iU3N}6 z>72(TQ(3k2a6{2Y#$?~FpT76~yEY7M->xps!Q8Z%zPts&h-Fv|W$cJn5Wd zZ{kcHFxHL4u_wXaWZy0sbVc6_J}(|ayEc|ZLTleHdhueT$MG8l*Vi}jJ@9T7Cg2SM z*@Ix&TrT(M4HQOT2u2|U-Hqh-V(4BN-q`xtwRsZwbdIkL_qL1Wt>(aA`*w}PaJmUEh(zO?f)C4OL#W0!_8y zIncpZ2OCYcDQTPiWEJV z`f|EEM$Um&B@d^tdU`nFWycQd!>ZIFT-r}$+4>>RZ;E>WBL~qa;X#;r#Odv#=`v2KjU;UX(sU8ZwdkNz8OK_Mn#&bxRxNT7N7LDsWjE*yOfP@( zG$_CLDbZ)0nb2p`twm+<)1vcGCQBEsvZbJ`O;^O_V|oZFbQQcDwMu!qQ_7Dz<^0I1 zk&sXKJHJ!+p>~zcTK$%WZf0eo62^!9y$kH$Oup7-(xm`(U{Atkhl&nXb0!7S{$t zE79_D^bEZiT?-UC@Y5>OvuXvfg(=V$7#?^xsutJFtq8X00vIjei&`zT+&U;)01XuB zQL6x+0geDd6Anxk*9QDASKHO3C6y{Av}CCLQiq#X#`&b1v zn<-ehAlXyB?+lg%J!Qh8`UJ0;(^}V=62{c?qQIAl*lp8v6%4;x%(nnsTK!T1076;t zc3djv3nhW;l**lUV_Y2#lF^`)E(y8{Gb?jUJ1aB_Wr;P^xHM;r?c!)G3!SuU^mFw> z9^-|E&Xz`r*0E-z<&ar(P_7u7rIkAv={4uGbTnNOgfskn%nDY>i5;A8+giUXO_g35 z(R)bS9yppScc;EmK_Ugcso^gPW{Ni3?b^68aHf$u84yFuYg$uTo_ICK^N2L`y-WSdpZ2jVWf=$+X_iX#+|TGGKO>1Sxp}RG-nX+XF>zc~-w|sbfVKcv})? zmWNWh;I(8v)8+9=H}v~S4|8f{)E$)}0?XiN4ln0CWNeJ1kqQI8KiX_w;wGO}#Uh)jzOmi89 zLu~=|GyT4)VzgDqN^LVl$4i28OYmG{pfrdi86_4y63X5LsTGZxRO+I}(3vD;(&|NF z7Ywpj_WHxRU+)(5X1^wtY`_S5QVN54nMU!C)P;<%^O^Sydr#Z2V+88WTlMN%roX#|fkwsi2p;RO^-d_^T#1+X7eTSDy;b2fr zSdN(pbFMdSqj0|mD~VYfibXY>9fqpjtpzh!M&o)?vmh0 z!th4hO#3CIS##&*Uf;|8xxbbKZw`U=x=VsLqeJWT`b&a~0dj>_t6}Jw+O*n-xK3@> zcCr#WKsuyUAJMgDdtgk&J|z^y43e1}UNh*2lF)HTM-b{TQt-l>(JGO~lAur)LoqW- zq)K)h%6UkSc zOqjCOO25L4!?s?{=TRmniC7|)ZekDzcz8=q%#z@r4Z0)d*vl;m{z(u|F$qz+cFd|+ z$7^-T7B0*MuGE$o&1ukFtISSu7LTD>K^;+zU`cQ~movS3Up9z}W#f%hC@O(rm@!Gk zax~x?oH$qOtWP)GoFCU1Xkq+JHHxeE=8RGlr*|6a#*v-l8d*~Bw*Sjoe08K+u`t0ZEE;|q^yO_eckM_6b_d|{yDZb}#&uagW3L(UkVEeVoIFRy3gNvL!f z3LBt)qg--xk}F3;%~0pnem~JP3*m507JKO+qxBrW#dHf9OOdFiPj(tJoJ&oJuKFvT z(&%Q!o{0}CI?dR@JdvZ0u4i>h@5DT2Pv%m74o5hHjN9FAYP%LOY}9fl)^+E_iOK0o z0E8Ns5v#!nZ7zG5NN6oO{FmF4wDuUrgxtI{7>UI}OX!u{o+lZt2;tpcAwQUw%vMNF zGXtnOapv=>CnQ*XWjToTDpeQ7y2|wtwmIuIhG7WVmFc`c13G~baV`kPey)@?p(;ks zCNM_isE#3J_-a=aJT~PE6SS5|6jC*pJJavkc9 z2&cZ1hp`lCdLu6;$yDjoC-c19@cM~L*z&9*<=GO;W?{UFwQZcMd3utRgsD~T5dO3u zn8OM=DqGDE9+arM3a`{C)-w^_G~3;o+l3r4&8c%XuMpG0TudvL+EGc7OGw)7TUnBA zvBkJ2X8MghP`6z*GwlYAtU%XgL2nES;*y6jLx*V&6c&@B702NKHfCBU_6aOxRH;3{ zJ0?q`9-WlcRzYIvUQyEO4GSUbDUAw{9K{_G9(N}aLF!9_(6VDO$9=4EurjJXCI4|diJyoL;x}4%| z7}wJN$jj+iolujpH|XM1xKq>R)zX9Pp6^(?T4#htyVUV|tlUNQTDHU^; zujhrtG*fMN%6u@X)ymD@AT%Zuhwn86xXaAFi7WTfu}L8PmRE5i)?%}f1<$yCyOQT; zUXdh&1TM~~oM^8I`Ud30)vyI|^LY_e5oJyF3slG#$dUo;lVolTDs;V*H|T7evU?ad zuT@Gkn&m>VY8dm}Jk&A7##N}cTFbehI`kVU8TjU0aV?pkd2Y2Eg=RG0%|q?P zrb@%yoXwE9RT>s^nP#^~h;cX9>v7KgiM!yLQgc7!B7Fd}XBO(pHU{d1O-Q zXGAxJ$}KAB(1n10F>MhyuTJ9Q-m07Ysj94!eFu$UrRIPeGEGM*Oggd5}I zUZyr;>O-jJ7UX`r+RbaWSz}9sif0A}uz54XF2o{XKhwkG8bYx=jSc0y6c64sa`Fge($6Cc{9fE;2=G*~3<^Gi-HCG9s91j)k8v`6Ft2yx5S=} zDU3-NO^KU}-b`cMX`ghu9%XQLv&|WmX0uwN+hO3RD7whTjR_Qpj$WGkrwSA-{kB3sWge1+BtXP`mx>?(3xB0-Bggu2z zClb0+QmEY@=4M?Cs<3>fYBsvPp_j=YY2w?wlR+jCRO^Ol7>%un%@qh^Z1%i)Tu41P zGiVCS9*$(IQfc){V1rxE2vMx}s19i)841VjGDX*&N;be0gS0xZ8FfvfWSaG)(U`T# zAOhBqt(Zi&&ovouQk0hji)zwShc*N&x=h(49T(?pjZ9*?>U4Pr;jyv9c!f!2AQ8D5 zKTer4l1ZYhK0#ezT8Dj@0X~SH1=(4wwIpc8Q(voh(J;-*T9|aD1ffx|{D*AcbvnLG zwYm1Fm62x!;F;}ATyBnaV$YkCW`bobUBDHhD_{{+w;J)1AWPKfj@+*HXDQ#2P?>M2 zs$D`5ZO)X#3Y6`L8f4>B6>~6%&a-)*Rm5JY-px!=qr)|jN=g>kJd&S^)9h-<@u<=w z8!TB>d!;0RERC)*aLUiiUP12-SiUn9Y9m*u;2m7bQW_L!S|$?7q2J9(8L2D{vO3Ob zJ*Xz5OM;O$7$ZF)>_t@B9)VtGfJ2T*fGWfIy z_o#wA=?lbI!^)JGON8EPTj7Zu%L@Go)uL(B6%kL)WO&n=={SS;JM971*3F@vc+sRM zxxfX~5F)>c)J7;b?Ic3mtLe^+uc|`B3`ffz+E$(bOS{yDN}X6C9n=trpwS=`j2gR* z?A%cVmnb*632I4(W>?Y7943&oXKF$NW$>X}Xf^RB#Y1&vj80zSb!qoc(_zFN}q!XTp#Ym|?>a3KrJxT65~n?M z44o7FbcMsN3E2Z&0v33RQ6b|uFt3rYs)$Kcxm?Y1V>avR{EQN0Go~s+v)jfMuAi+M zxiUJopqeRmNV_+*StYZAO~X`sJmOTO(O9plcE4@P<8lXLn8Zv}fv16=o~p=QEwFvL z)lExwwTc*>5UI9XAc}(zI$f&9^OezDhVbQ(MFi-?N^5Re_a?Kom)iFj*2r_2z6KkH-`qr~Rmo zv{kV@$qq$+U|D*UX<0~0g*mh9>tai3&cnI`^m8<3m4bw`ai=f^l24VoRiBAgZWcdi zIuglwxDsMUJ8Q;aOV@$@lu>|Mv4Wzjo2@9CIPG3kPdjSY&Q?6n(4?N-0y-mTQW>T= zbJDEiA}bH4)huxgInhO}Pr^i4?xUr8r4;bKgtRh~BGbz{gUFV(JksomPIa7+-RiuN zr@*GlXDWeQFkIR*QQGB@)owIG4z*zq?wsT1uo>fZg7wuoVNXkA*zC#~m+gB-8E?j{ zT_M6AHRYpbZ#K6SMr}6iB@f}59W@McO3n!|%-9}w+_^_lYBFbO3V6t8*%WMZo2_|% zpz{f*R~sVPM~Q*eG7BwKsyqr6T04frm*@Goc!SPFlX? z4HeDI4hA)~KlaAP#A?nlC9P+Ki4Cl_xKSR;qai%?;6ZU(&93xvzLdovEup}k2FB)e zrLT)k;P613bs-tY^E#~57kZowr4jcs0=R&qaXC`&Z>2j0E zG_y7&W$_v}XRO)8ERBI%4Y(MYXLQcI?!KbuOw* z8EM8jMk8Z4hU{dqi^@WcdOYuOwA*M;!vK+iHCyR+G~Sv(dEiEwLQX&ey_M>7S()mS zm6GZWrUO}1+GyxAlu=<@o{6Ev1e=#x7LQb#t`%ytqmH>Nimh3jZ*WA?O1z$Btqjhw zB(-W*AOzn&Wuo? zS)R2RsG$QDyX0Zch?gy6t5TF$oW_ta$OBj)*$h*UYMFX5X671X`l$#ZT+@N510)Ww zy(9=2y=;hX9HZ1xdU7}!=s+j72yVY8$Lhpt$O*YM+9P>#tezZ;9ZQ0LiYLeF37G7o zc4A3zF+2Nvf(RIy&2$t;Z(<>ElVoIF8d;r=h7@u^LgayHwz5Tl3TsfESS=^h9vl=; z;M0=e(S+BS$KhrQ*%P1b*rI@yTe4Cij64I6aAlMUn!Fe1g4i}22BG!JrtYAPw63qT z9y%lD<4RVOhw@bPfewjEh^EwlT}0EK2v^DlWwFceDpxQ;b?uA~)a}lbg3>5X0m0>coy*Xfp8YSX>u#%}QcZD^jjFaAJF5n)re?1PZInDuh|Cz&`4BQWa@Q z3T(I@mb$<(kf@Pk7qRAGgsAEy?oZs_N{tTOVM9wiB&qSRIAq3Z6T}VMM!w6b5|-5Y z7St^gAVk>7F$z0Ruu9~TD925+KEb4XmYn!{UvXBGy`1I4{kcIEYWfh(*|raQ3M)gZ zt#*5T-YcI_(RnrB z4AqoFTck^3B^WIYrK-ZqW1yp}cwcLdV4Y9|yw5VGV~$#_j9KZlGY#PM<5ok?;cb%Z zQrZZJZgxf~+Bnf_dQaEl8f53SRtRwuJ14SJjSZ%4pj44y-aYLo~AN zSi*-Sh-N}%E890qD1xsfHpwQ_>X7BF@sNuqGNc;1&>RZ&7S@RKl?gZJ6Kyofjw=WX1Z+>Y;x%0Ak!VKl1~IN ztxPy7G~2Po7f!Ha3Y;A)iDs0S0Q^03~!0z z;oQ9LhjUpDy^@gnb*2EU#Fb}h*ssARjkqYHwHdy-ES>rr0BQr@(BeXO{0rjdThDlxF#HlaF` zf>;fa^ohI=lTjeItN`FmMWI3?p@ILa*#rVsf{Mj#Fm6{-fc&fww3}H0XvVrpuA*jT z66=;Jy;mlba&=M$V#BDE!dZtKcaSP7wVD&t3ZmY)-W$PCr67!a;9{>s>Cn?XR3(9@ z(hYFn{TI^F%70j?mP}`y_vB7vXsK)}Rf}$?m{$vJs@36n#zGO&1F>~E?J1&+sAx87 zv_rjG=z?HRz6N3#aR_#$dZyW2nRDf#3RFXh&LhLFF5_4c2R2PpHTWjv0NKSiCtb}} z^I@auBy)k#I3w+nX3TRYM=){H0{JM?5EZyMQC5>OP68ZrNvk|G=Tf=ocl3#fqIQzH z9#J2VSvV8aslI7-TTUxZr7VplVxv7m@}bkxYppz1H``vmiqb@JWfBjkEjz|t5|-PZ zW1y|E2`e3z_j0mhaV=qhSr#Fb4P5Gp2A0=rIjZUKND|~+Hp`CMj2qSK0fQ`s z!#JTNNSlyaC!diTG2;z!z9nmA&b91u?04I7s5>MGk2G5h2;Wtv*}jUxIbhO~EhcU& zy^LSU8If-QD|<<>5vavvCbNuE_B$@#N@-3c1>5RT5Rr})kXF%3++jWlpt3)Y^XxRn zTNn}!`ARsmfz=w9pgiGoZq^f5JR4KQRZiimg0LLbhGa2rLMZX3UJ*D1&7mSb-G# zs3o-91l}iY3G6|zd?^ruR(FapVVId){ZYc2aJ|{Cv47eZV%PerXVhcr`%OlPg0 zs=ERTGJk*vv5*MiiePQZ5*mP)*RSyfxsrl?eMjNFK7vy`ZuT-35i&Xfu#b*MF{j8C zfkDJ_3G=F+B-*L#x=l%h(>XI+No=!^!uZ&#HY^oHf#_jFXaO;i_nUTRz_Wp+_WV>f zO6Fudrt_9GmSUr=Pq;8!aa#6_N=gp!uZ8WBJ5^RrUECz6avdL#IZ@`zeGC~z({^PJ zr$Yk-)74fL?^Y*S7a2M!jDnxESFAb!4Mo$ea7L{fQ-Y`q(7YChTWU0u=_xWL%;8Df z&->P_8wTzOB+Y~a33$$o8X}JfO`Y(iYQlFyz1|$mX}m}oGNIsP7-}|2$VIK*CzMt6 zmWp+u>$qLJuHmt$dah*^hjC$6<)a!EjMQPVtyJ--ni^UpqZk4Vc2n;FVGK-LsvRj& zmkRMJ)wiMcN~es~!oZu1)v}WTp~kR`xtw0<7(z)^Iki8^K!Z-;V`5l^_(C%m)H6k@ zsP}V&oS`Mv$rRC9tlCH13J3RA!a;(ZDPS`{bV?(#sVFv!^Ed)_bRDP>`B(tYCY+7c zN>)DKNnE_k)!Lb~)S-2B(B!02v*_6BJcpr-6RdEEl8dcu1hHO@%n4mIkby2P`{fC( zXuwD1niHc~hC9s~2|}2PSF1#LnVS@}MptAKz7Ela>2!jRWese0mpCjW$gqv$%A~C{ z%OI@^55;C5Ys|}Kp+RtgS13hN$F}LNi{?SPnT65uAW+W)4~rXuD#N zBOaU=(Q+M9)6HtGgwAOg!Yd3nArZV;&a_D=kRhTR#1PGUnFRPJgeK78<<1QF-q>Nv zuZ(uej5A{e;E5;Vpa(6=NFoDh)R!bQ;8Q4|JqT1B_T!AD#Tn1xaX;;mCJyy;KnyE;k;+XI5W>AV1hy$5NPz!Iy7Yz2n3daXT*l8nIYO-b#Yc))IXx? zlHjU7NmGBK{@M@|-~$mLYslxAA+_i}%_E@uJsp3GD#Qy(E~ZV>#0(j+!26nXO#T&JCR^ zo;&(_y$O6T39h>WkCJ_L>>^&O>xBY4o#JI<+*&y?^mba7+F5JT@3-nLQ7nP@Hi%Ez z6eVSXlnZq>vQ2iJ2bl5nY>xvvt?5q_(N{GPBX<=7iRvPODMA5XMJsAHte3{K84V4l zNVAU{ExB$NTGfC95y6Z$_PcJ+Hk%S92_!X=T8W*LDkagj>wMeAjZPCpk~vKod&yws zbEsNn8|{SiKZ|LHGh;=tJap_#zdRmhhC{O0vpCAFcZEU?iQP%bQY{K&DnurvTXvc8 z@~xR_Yx5T3a=tjvHn0NHt7>7v`>)G{g+~kZ|x9QaBxy<&gI)V z6we1FR?qZgc@`sDqurAL%avK%Ldl@##w_sJfjB_1-f||?Dj&O2;mbV>7`*nZ7itL3 zjbLV8#-j$(P_Z7^ol$bAG|@?6+ap7z2Za9WKBNvv$rR zklwrvlFOhK5Bnf}dH~n5K`vjH=Vp}Fljfk@G3!8WOr!_|59Lt`jw-Z+5bU1k{J0G4 z5G=-Lx~dG*+^`KKbtmzgj_j{2*eQwRDorhBnVLP*1*Fk%!c08Uoo=yMfohIRI4wq) z$-omH#!Y^r2*ajh%5@LPP0;{F1E~eTUr|}3Rp_#ISC2XOe zchP{vA;9je39b?$ZXuJD08>K*AoK`>L_;aAOqj9444Q2g!?o71;IvbkhAY6zowkF( zO@WntK;|q79{h9d)363X%@%LZ=d26i=3H0$eFo%4*1G}a535tT*_1Iu;KxONhRkTkg z#j1?-EX@o7uUypu2R}+tER7kaO3#2`apIgZ2i69L*I_JeKw~`{4swI6yNce1ZM$E< zsAk2LJU9!Es2aQ|huV}*O2A(mwI(%s3^J2DalPrG{3O@)i5xkJaSJAJ(=fvxD@plo zlPe?MN*;oUHec%JtCeaQPHnU52z_t{CC!l|U7O?JcxMbt)v|+3>nIFMgM1Y(#{`## zaj##BybQ0JY>{OQoT}tk+0OH9&Ck?`!4w}@i1CJIIFY7 zsM8KQbuW}FkpvPjK+alKrCgFv`W$v4hA%ExH^VftPVvYqWC_P1`&w(0$az=$aT4!gI9?Yb-^DLhkjM+%A{e8 zjXpVQ;l9wy;l)HYLoAq&veS&ON|XoEnR;Ycm+PrEkQ;ahcI_cNDe_u73A&m^)s{Gv zDjH@fmaHZiI_s0M7fjqqW5g8lB~20ts6NxX5U-Eq7#HA7yIvmij0YC44NLSWJD~__ zXnL~ddE9W76q*H@V`7#sa+NAd1bVO-Ld*l_VbKqBIx%WABCscxL5gz48VT)bcdVwZ z@zj{f_3S)M7}2-pwow4P9dETYtIw_Cz{?m-64ASSgK>&v0M(^fm8DuH>Xs2Eug=1x zGzwuU>wqu_tg;o+>WihpWH^{uNop6WRV>UEnH6l9awSmZaTTVeQU{z2D$_KVOt_=@ zypDY4ViSZXrVUD?z<(>)C`3YF3|Ku=vd7ybUjbhAT3pX5klXW#u?|VG#~=&=l_{C3 zBaoO0)h@-G2?~CtnIw~&f@1}wmTQz&LoWKd2O_yz#)LeMA(|9Jm@@`Zh&Z8`F`8BR z@stce4rMnh!60w*7<^mqAhtS~Q3DMQ@&-DYM+c_@1I3RrFvD#bm&@{*W>C?}S zbJUa50XWTa@Lc>+&rJgdI=ByD|6yKG{LG^r!4ASw3=YKqsRP)D6Dq~vH2lF+_KSn+ zfJ+>=w>k+zd7|po;${>X+FlC5hB%>UyR{*%?apRZc(;Nn`fgp7G4KaYP`I+3tHbLa zoUSdpMHhtRK%0OrXlRcHNSDf4JcpG^1-w`;0j~{JEafQ>;mK2YnMRP}ezJ#DJ%Vi0 zH^3zbvVBOIWw-Gn3X>F?rLcUtmL>s`3rx}Y|0SdMp z1e;Ueb-_vN#RUN?maV-%qJB~Ljab*pzUwT{-{15h?Q@pE*lVoK#SkViMA7iwGNmEA zvnZ+UmPtz6O`))wBV<&|4ykjXBkCUP&|;cbueBqmqfpEPUgcmoyEPpsMd!0rF$?n7 z$^{&p^**BN(I~rr_Kd8^AEo}$n(>c0DJwE-?+=pLKWm4~${`2sx6sNM0XVOp9`a(r zy2S~5LH7=Sy8i{Z<6&DT@Rv(=udFVBwHe3&APWYAWxW5{!7hU)RzG0!76lH$#bPZP zU@iT3CU33kvFXpo`ef}g1{7SzklTM5!>Sj{QpY9|xWjNJ`#L=WtNvJbA*?7JUi8=` zfwyD=45m$JKd1ITSg&$)gsdvOAuMclAkVEF+$ZIo@9X?a5`8#~6 z?O(TROY;`f;>N*yI*zVL4=J#^3gr+vaLyqw_X?rxhtU+=BQ!L?Ju$owGRMz>{zQqb z`Mt%40)&Fd-^6_^Av6Wou^fi1)df01STvlYb721^Fe_666w@aGkY3=no}j2uWtU?{fUjCFsyVE<-WwgY_$p<|Ue*nx!(IJ}hT z%aZ`8ZGEDqo1z~bjnZLdjwIGtUJA7duKC%bzV+b9lAN3@$_mC@|8Hs(APm4{?44~Q zC<*U@mp=;;xq5i8g_Yb`JRNW5(DjnzL-2gCWHd3tP2Na!JJ?Th`RE|k10y=5aq;tsN{< zmL0=R7j-w*DsR2Hsp_h}+15uO&DlS+<+2Q(|LLj=Gk>!QvSK$DpAN0Od~@u|0OE^9 zD1-p#qfKe`*G6s+4rk8>*L1=7*&F%Du(b^YhL<+nf*prfIyP}|_0!Z13~)Eo)&g3w z>fqyJS6xg*8OXmwCSwnX-W@u!1FzSq99@5}o*s^u1K?YS*M8XXHn`Ji(TAg9?{E#X z0>l>fHVdgdIeI@10(6rD3l&^hc9Z4$x^26&n=F>M!Od<*FSUAlc!v)R{-}f8&jH|o z4b}1Lvbw`Tc*`2J3vs=7w73&*Ipxa+^$ktmm_&;WWv~GR=!W*;H-Wp)fI3_&y^eqd zj}I1EE`inETU$*Wb5SfS6^c~L21MgNJ{u~K(fW)fj!IJMwUVwR> zs9R$`Z<;{5TqNuDx1Qc~b!D-5)@!dG9<%b%*V=(e03U8yAT(S9D263h zZ^yx*Y+ij{Eu32f2SD2$u!NTUzLg=Nz_E>uwi}IBwq#}RZ5|It%*eJS!x0p2Q?g-+ z2v&|Je>I3$TcNk(;V>z%!~?MB_Zv$)KtLR^K>xy~!x8P@_5{RgTe4Fuh$ANCFARt^ z33JeKHV+7Hzei%Hh={cr*`|QF9TA7@qK@!86!szb2Dc9| zX@lKI!5a>lv-02H+f)>roa#i2oMVKckMvu&ymWUD&xy51^xp!;Be4S))E zABh8K>>y;^JejdRt=m>20w>lpNaBFe2oo9V__ zZ~s;YjROh=eArQLjQ?SMvflh{Y-a*Io?Gf9yTfp??zr5hz*v{%+ZGtv+qT~`w*EI|z<|sOmyo z>@YQQ#GYWAdSu;h+%_=Q9Iv-Ad9eb<;y1ewT|fg}eHZ*;IiJTor~o`9E8ocf%!!bj zi;W}dZyOxwnloja=vc3Q>)e&B(6Phx$@;!vn_}bUL*uq>XDl7#I}#Y{mcs48xJCV+ zAmqM3wGVvUS*d;NvGcz;G}b$?O?9%qXTH^`6W|x$4-Ozec9ffIowt8+Y^=+zZDM0h z9o_1nabSM}V(~i)7;B8(CN6IF_GCQ);5G&euqWXb8xwY4c8B3&-L~H*FxIPYduxJi z`Pug8so%zc0pP&x13q!!&{zOt2klzdCuW<#Sodph8yGiJk*xVcZgq-exibO3*nPwT z7(24;l+)0#E^J zZXX(JVV7+}V_oLm>Y%Z-Gk{Lng2oO)#`>z;CNkD%<}ZzmHS_sa2Mmy{vlUQVCT8rU zbguQBmu({BW~-BR7wk4=#`=!?)(4FP>I7tI>?})T-O;p7XxwadvYs5WO=zskyjvYK zmb(*RWNe*p#{PP?C&Zeeux)*^o@lsDXsjdq)(H)ey|e$sBEExcjdf#TyJBN4^=I4I zI3g;yO>nHMv|AlGmYdflIF>3Uw}Y^;&fC8Q@GCU5cx4hPLf9CeG($2c*w6MH9U?FlBb{Y^O zTjM_@Md~OFI5=NRX8q5kxU6lDwz;U*YHwTKuGQbBv^_#v-@G-oNH+R2Zs1;<#zW@( za9B{cqv8Ooj`*+=23zxg^H4!9jezWd1M0VGP>{ES;s`7pcyJ4XBDVkqnWJ*I=1Yn8 zE0CRJm94GS9i=~3CRq;NZ+YCRB?WQ2q&OfLj`(m3N(y|bK9HTEK6cXHekV(c^}Xk< zT2kz!u;We!#mZT=Lk!KGbQF9ig92R|nk0GUTv-2b3(AQ_Xp_99bjPhYL{M-eaDl%cY{cLjqW?OB1*@ZNi{F1m zhY^l&*l#;va0G>Ih78se#;tN62lBx7$L{}X?6dV$uG@ie*a7)fBx!l;8@OZPuS#Ov z%-e?7&p+M9Pd|MzS!(`XKPzOhKFkrg)z_wCwsSSD2B*pp!IR2%NwH4ek+UI>~5dt}X3 z^gZC))*p}@a^cjon71F zz_tqCj;@`>u9MDrOfr>KOAj~niMDUoY}aEpPdN#E3_@R*om0RkS20h)HsFoL|L2?o z0k8HX+(4k3p#_g6!SnWc&?1i=;=z}b2AiAvt1Mn^RiXVb?)qjA#fZ)Gcfva7=HL8Ax_r}`PdEcvJdEQEC$<# zctcn9>SXAGF;{In0t}Hu*WW;p#p}&YsI3QXQ&Cr4 zO^53GWE748aRkn7Zf@b@B<49k>VEm6Z(V)LQ?9t;XZLt<`QC5(vUS$EufAa8^`PIM``X`p{jJSE-gw>jfBNC~H~;fK&8N>UzSAFk<@c{Y^;r+O-2L0% zzVab|_~lnW3mx~=>mGCZi<*1x`+%#TedbvYe)Ur~zYjg;HJ|#>r?6ALyYUaq{a<~< z8}{CCn)|<=`_Q|6w|Cxm{I?7Kg}dKypXTM*jXz-SNqoe=?AbpZKk;Q(KKp{}{&@UP z-ut@i{xy#N@v4u0^Oa)s!JmJ-^r@fK|LOj@M}Ov}|9bg-9{usFKKi}WF5Wo3_3QgT z<*Y|tWIp%}XP@_@_=m{|bNm-S1!V+*iHu9v^JI>1S^qKKzlNt4S~X{P8r`8s zcc?qgFS_g}dp_{KU7z==g>ds3&wc1i_B>?Qe?9bpn{UaS#zg1qFMsvDHsGh8pZm=< zZ~H$_G6rA%*z+D{j30j0cfV8r9ebMg+|%y&(+iES#KbSI*?s?y{NTKo7EdgM4}IO| zUVHA9jq|R&?m=(;_t(F%h5y^0(edxBy=3?4*Z<((&wbOntyTQolVn@_ptTR(9269+%P;79Mh(-l9v-;e(ND-XR+y8&)q z{0aLvUwHmM{qzM7dKmfWH})S=d(Uyled*-mu08JQ|M%1rzD?ZiHyd|*=_9T_`GOzi z%hQMLdgry@;!b_y{W70;AoGZQ_x#q~?>WBf@jrO`xo6z}dncU!xr->#~GI$SGfB4XI^uM z!nMag^JmEC?l3#^x-UHK>!1Dgv%hiP>z}gk8^1pJ!WX^j9;ZI;itnHHng`#Nde4X6 z{nYRO;fdcq_4)UEfAY8YfB1>Ny5L(sSEZ|ud&V`dddUBM_^Te0f7|>sKfLk$JwHEg z_xb!!;QR}oe)Y3X)$Z}ok6d)BMxW@>C;t9|8y+ft?0EgX_@i%p;&aY_pFMry-M-hc z&xOu-aQ;nKTX%cqiOFdXdGdqK`1qc^A3x=!Z-4M-r=E27n@{}0hrjc)7oPf?yFc{z z_kGfb&pG?T$33ZW&H2wbh5h2wpKD6b~)el?m`S649f6W&@xAERL zJo7G>$frN>+Ue-!mp}80FFxR!8!vm!*Y5Gii=O$A$Nt@&kKgwt_XRJJtM``g`ob$` zFFi3ge9q54_|D?@6ycg{pRc{-HTN#wTY1Q(#wRX)+CA>|qHFNrBH;w+X&;{bM#Y`) z?7QA3yWo>VJQi)6mawkMCZ3{l-hUf3+`WfAh(UKe73d=LvT` z?WMJ=e{%NMU;pI7Ln`F&AAceC{72pSeUHEA4eF21IqOwte(l|7z3SI@{&ed-|NHWn z54z!#k30JdFMj9Ad((%9AIiM;!|q)#G2V6k9WVKh_nFpJ?|x?E@#lW`?}xAa<|XE< z=8wPoC0`x=(<{oaJ>x%2^}ci5oN$GoKK&IR{txpNFM9lE-xh!Ute<@F`PbY%cf)QE zdw%1OKNY|Hfd^dvX6X7iJ@Z#roP6S$|9PLEJonDuwH`0t^BvFYz2(z)d;euW`^~Gq ze%I^YaQ{6ge(f=%J6!DD>kQ)DZ(n)*`4@KZ2VburzwdDuwtjTM)pw7JtzDmc+&y3F zdq2Lz3x4=z?Ch_|zkl@JPxZv9`r>-&QI{S658rv?1Fw7HQ{V9Y2b|w{(dGAeP2;=# z;~w#P=jp#ksW-p<68Hi0cYUY*{(t@C6<3L`(%y1Hb@02--*~|vFV5p~czT<=MeDbrUd)F^a|JN5j`~8R4{^cFX(@(zek_)l+vp?~_|IU*}?|jeK zu@6aSy#1l?l75-L+fU}^y@TT*{`k{>{FSHP-@wO^u{N@_WE5PzFgAZNuU0UO)MJ?cHy^y!4HGCl`NW z&-8FXl|0f?9TS{KJb!f@^33VBKqVB_o<~Hi!RPw@rUQ1 z^^I3N<*HAA`D9Vle{Rpu|JnGgkNx;-7e3|nmp}4DANZ(t{9DfL-rK0nH&6S=AAIl+ z_xb#V=4sVmz4uqIyS{jz;wPW|KKQBCn6$4r=cJEZiv0a6-tm&t&wS85YPEAuIR2G) zz4jNy*Z#|??;PeHamMS)^qb%PuKc4;yi2FN``>n*w9CBTo{#-dI%8Zq;f>mJKC=7x z$6avA)vXKP@g428sCV6s!a09)_6L>ce(ZvGJ?Tjo#b-Oo?hD?1!z;S?9DntL-tnEx z*WUc>Py8Qt@LxYbK31@ySKRNrkEB2O(Vsu;3G?%B2;OUcqWp-~KRxg*Z$9H$*-P_@ zKg_=3HLv`~^Y4DIm)!Gj9&y~X|EPPS@=Kp4YR6OMw=)lT&BedJ^#1mJzvle%t)IU7 zl9xZ?{@k4({`T}vr#|dmPxxiA@L2P`_3{HZ++79rGn423@yYD3E-M{(UGb^2$@?F=$^3o?_d1VKYkH|$|rI!>)!F7&$-Kmr@Z)@ zXJ7ZNcRZf?#TN#{+VeJF{n9V!7yZlctdG3sY2$M*f7+dQZ#*Zw>$CMUuL>X8`Tog2 zl<#w+eg1Rjo3D83RS(c#Zr=TQmsIcifx(@2eR2M(HxgH!l=;%>r+@KNAE>=8T+k>b1{!YAyfqYU>q`dCs-$lYZIRd+8OwdHJ;` z{qzl29fqn}cL!VS(- zUa~j6&)MlmGwDZX-|nQZzvd(>ec1c1x$Gs^&PV6JWODMO-~I3_|LvlU2mAkd?_T=I zE5CRAou2yxR(?i9e#YxRbo%@MA9wE_Mbpd{{SPR-a^-~ zpsha@h)-T3icBS<#!XarRx(=b#HzT*s`xpp?hp?O4_1w(16${qfbVM!SyOHqJ}2dp z+M(@spw$Mg?1QMggpEEX<#{j&eK44Po1p7aye0*}aTN190AG{xybz(jLG`*w_PTI6 zf%l73dkL49+7w0+jHLT*B$cevzYMFeWKn<3pBSv0Sgf0PlS~G!mO)d^tf6kf*fnSD zQ8ntV6V9j`UhhphwlA5KoJTPe zGP86qbUzc&@{swu_F_~1|MX){VVSulyw-mk_J3{wri`CbgF~&c4?!I;E&=duT@eDWP+i-&q)AY#3}& zR<{Zk_gkHcL=9G9a;uxYI(>jAYi*uf1P6^Ku)YHeWA@;=VW#Lk+%Zq=j4#`^*L1}O z8%QpPe$bxu+IR*hDBC=P;mZ`EEl}{N-D)s>%no2~(uMS;tx7Mhge7mTPzy?^33+vN zh4DxE0zaFnx*|Cy^Enk^*vT&_BHUOpsOIDC4jCStlD{MOWy5@oz%mQJ(TPfRF)c+4)(*vkd+eystpMm-k6UG#fn|B9Set__#kYst z-lq4}hcJA1ckC^^Lao!uuT_FhA2(Ga|-D^41k7r;$q`@z93iPfYfv!()8WU`~bmFvTXq_N1{nvGVZs~sWiI-S(a;- z{jEUXtWZ!QK9Xz>#F5Q83Rvz+8HBCm1BQ#K6 zw{YTP)+O!Im~Ry9ZmxsyX^%lVPdP+mo1G1cA?rF_ z&qF*Va`w1;bdt43>Q8HpU8F|O_jpcj4Hj!k^~?0Z>(_Uv9(5LTgA1Wk=2x+)5e$Vm z$YQM&wY51~kjXTn9k5Z5zKDRbjAnPAoOctA^chMBeNk>^q-;jitMt^W4$)Ewt!`lk zzsD)9(%$UvrcGA*z$!iFY9Bnk*lA-t)utEgXU%c@0Rn!AYif+gs0#>)R(?S{qs>N4 z_E}0>kJlB%S!wJQVt?gZihv2`-AV%P43M>gjlhqrUc_8-yHMR33O#{Vp@uz1 z4wyTey(Xj;y|%X;LxyLsodEKr&xGq&Hi1<-X?T6;*Ha6JmV%u`krP(&{)z-Lgw4^E zpCQH^sq-ujs99n(AmgdZh^1DW>LQlOLVErw5p6M5HefvBhn?w^8C!e55*&{rf6jeI zn82(e77+O59@KE-c$$_D?IJ6o~`~tNUEAGSz#3`65THDA#-u-UOR_PqzH+Sfjd7{8oAq zL$4j>5*9geIlGDO^Y-I--c2NTGotHR`9ArmK<-6oQ4PPmYfWucP4%z0>FU!)0c8NF z;}m`dYNz;j4wQ?M$XW3Isqc&hp{AN_pw^bxYrf-4P2H~8BmwoVU&SkSY0O)V1t_Ae zV-Mp>d#&jU?OKua#hF2Zw!NDeh3tqt=D4)63NtrgC)>!Y1vS#hcGLtH(-a@z_BmB3 z{q0Rno(ZMvn=qYW&o{29k3%P6ClC zGvqj9Bh`Vmzt<`(%tFU8(VWc?LW5zTS)~XK(EB419~zX`y{?#I_zoS7PC5kF7=R?2 zcZVTqVlF@#2E$Mk3pC{i&4a;#CRO$u(~OaRTcJm~K%49=876KMAwq#9LU~9B~ z0B5AM7J}k4*bcOPIGAo#r5@a>P|^=Mfg>%ki-iuxg!GJsh9a2oH(+E-^po;3X0?Kz ztbJO6X~&@O`?<6-jJ-xDfJd<&02`s)EE|s+G?0L1l>_{SLxQdNvvL}FywIkE*rewY zZ=!tiOBByaOoF$V#^=1frl25|L4@Mum{h(2JySYU31~Ea;^PT$PNo|($_srD6xo-M z@L4E{h*ezvVWjms5>}0@@wHxySryv*vXeMtOAdZ!A*Ik}%*vS?b|_}MNBb;|={Btu zD!s_N64rDXbi@@?Y8mZ!Cfo zOar@1H-tl3o$z`$L%JmqtL0;R9IN_Za>U%IId_6*%b%4sN^UgmkZz3hamS(Fm544n z(Gh5EiZDL7q)gJ-87Q{!TruGnTa~G7o-|@eL2o=?0YJXRgC0vH?%&+@Vk=MaI>bEB zfD(D;Nfm?ChmgNWkc*E(ri#x$P$AGqp%?*kX6Q_ZRv6r)nq?y$!=(I7T`Eh3aa4QO9*4 z1U1TjwN9R_(*&?MN++DltJmPoJ49h3)QCn-%K=0%Zn+9^aI|>GuL{0PdG!0jIpdca zT{mo9fsT@R4`^I8?oM~T!RmlBiWi)_iIo31Q}r{A3#tqT=(^YkR7l8)FS*H!)lsJP zWiO7o9`J^obrX~(=CB`L?r9X1Lvb8yW9e+rXyK1-Wn(@l zikzi769Sw#GNV94EyTPZ9YMui5)PSGf~((oLM;U%`!QmV3C*cK>n(w;Ic z7p_d6l2BIn@@&QMR$n-BIbg6o+BTqU1r9RzL=}OFA$A7Dzv}ncEtzI|ETJk#;qLF~ zP!q82^aJY^fRwi)(~w*H!m#8f7>wb~6@c%w?SGQd!xv zmQO;|AfzF^7}zq8jDY9tu0Hfs)m)OM0H%cp9YC?Rs>9?YY{<4%+K5lDbnA@_u1A_+ zhZ{$6Q4Y*e67P>@jymp!7#{N=UJ_102pmrw@OG19ti8#=Y=U= z4&Y|y_H;=bSKTvP7gptF!jXVoJfLcFX{bNEEp?h#vjb+oZ|`F!NF)ue`JBkW7iR0R zMjM&1Y35zuNVH@b+~M(2q9~%c!Z{h076ipkZXbFFd9j(EBW>JXbWo&8QVc& z9kZmbA|tj}Y9>X8z=2*w(KHAiteCLAvRer6)v8J>8+xc#s0qyZ#mh9xWBz4+wnw~_ zIKW4d;nn8eP&ldF2!^;a2fvC#_1#SN90T58#wZ7ktuqT^8y317R-9ZCR(3|oj|BHZ z0b*22wK3^|D~%XMv`Z{6=xUD<@`QJNOi-=JZ+5r9uWJ@(4R+Z7DF3L0I?x4`Ysv?7 zVBpuYc{s8bma!^KJgS275d`GeeLnI1L+){ao^g=#5ni=~q6NJV&^4sVi!(45yP?h0 zjh|bG{|aO7B{y&R0~WK^MUyJz_(AAldOE17D?2Hu(G6r$-4ED|hWlDk;*dg_3g|l_ zrWNYprY-)!b+y+7`ESXHxBrd7&c#kZq${+qyCW6$^r7R&M*&GSH=kvkK7d7ACS-lV(VM)pB$ z`|e3v?#07d&}C5kJR&xP@>ne7#VTmV4z6^@<0vZJFvz~;R-Py`@eFqWKG?E645ZX# z%AJO^{v_h8AVK?9Ij049G@FtaD}f0$2={j^P@WA@W|BJ&BXuRu!^GI!JBe4>eeJ#Q zqKIX{-Ky`@(R=7GoO@43;y2n3#yrty*n|mJq1t@T@&^MF=ZuZ7lTt|~-|NMh`4z|e zS_`wkH`$H=0W(7(JyJe;j9eKk1?2EEb7}31v~*RCI}ghUCS(B#^Ce2JWI2?|Yek5c zKRg0?dEwW!Vkk_sUM-~iHg4)QLPWYVw$P(%TfIybLT=2 zo?WIC2-PoZ+wI)SqV?)cFPvoc%d2t?{Xllu8D{#>1vlFQ@TePfdI%tQ>&jPbx}1Ik z(n4dQcRo&;FHPHMMbw-o5&zBoOqR|2+XDzQDa_B6cFpyC$x9l~eJXGvfaCjg2(=?C z&SH*H2D+hO==JfSz0<|68+dYIq7Ai$bi~(o>pKX|v+<)dS9#;FHO>~CO5GDP!31m7 z%P##kh>8&20P)F%+-(9*d_KhNXOa8Vrd8UeN66zvwGSZY5$g^= zJPBW*H`Rl@eywCc;EEP`HRLESG_M2CXqQ}4mregQ_sVxQqrpH1zMEqmQtV>$F~H2ker!aP(d^1_~iK)JT4##_{#-abd{E({J5^md{jU8FR4BN7W3ZQ z3{vgvTN`m|)L&y7t@#a0+;tcjqv*`P2!L+On}B|%^gmSZXOiQTh!3z*lo^nK!&Ws7 zT`ArKL{%a9q3I!DA#|U$H`ua|^Qeq$JL$%+Q(WnMaif@RHH+Dd`cm31$eEB;+m16{ z=JbIPlf9@{O0s^w!^Kke?E~Dod$#3tKF7Sh1==-_@_4v&%I7{UbqOP-Hq{F(sCdSB zwN39qmo~1G+n@&m$QSQRls7jJ6nE@)Sif$EM(j@T4!BI1%x^MQdofC<=OVzuTS+%Y zHUzh2aSb-LlHUnzw`Z+%WuZA$=J_tl8Jnnq_sOmbp$zUcp+MxVZols zI8d;A%GHaMwt)T;?EDh`V9GaVL0tQwqHy&ZH{|m-j*gZNRmkM^fbON+XK~B&q)7yI zp{P8NltY-5;#!az)4K?QpA18yF(us0UbQA?d5UBxZuQFuhNla21d(>~M13Z-KV=vs zzk_2~;x~72gXZ3Q5a75q^v~NVmyJo9c);RnRLGJTd~-#1$fRH&cAVVv7W@XZo8$de zc%aH2!BFS^HKS9EBA1%E$2L)8rs{5;efdxiRz;W?rF35>q&Ka88 zM~3{~0`C|42%0aZY}uzn#4$LJ6c6NzZb$l3p%9auF&_7z3uNMgHI=TlAoDky7dtM$k4qIZLGDNpb}5HfNDa&*lwMwRwqU$@gtB%Iqyg3HTUNC3E|U|zK++nZ8>*#j@R;HkVn za=@ujk#pbSShkHUf|ws@&=E)tayhAqfJ$C=^p>dYz;Jx>oO?Fz7l0R%lf2bT z)-$`t`=~eLWU1wJY@MN^nRHj^+C2=dKX)U=}hL% zj*-@9RtdTnZ;yUzdC&L>+^OMwMEC6bBGo3WoE`850Sr?0EJ3rpWs@*eqthmYzW{rO z$p>)e2=xs_KoFxILgaht9v&r$6!8VbR4MnOa#m~s-BJ(x55smd-zke+J5i=Cyd$(7 ziA=UDImDHcZnN8#_=%E(EtQsfkRe0akGhW{86etrtVSM~t#xEbsim9pnBw@zjh>m# zhc**V5U_j@DGWC+p+^(P9)9Vyf(m2|C=1|>fu>b)y(ZlZh~g$Hf##H1zic}yC*It- ztml&M;6%CNp!|GITW#)aITvMMu*>tHSggbP=e!5gDK_snpdZ7q3WOl!A*gCkRYjZ# za^=PnCau2^s>vIJEF|v-$`ip&F{SCXvk5eq!$G);5rOhtm8CR7I6ENh`b;M?R90

qiwXBA1mO$A78XhqCbuXrI#z z$Bc)x7>DRP0k;*b@fOy@y==6ueIo++4cM^n>!2hr2;pyg&gS@gPefc+r!H+!LVy~I zgJ_t-;;uxyP}NqqEW-x8etTm#fVCnLF(e+JkP^v5e3-H|vf4OK7~G-Ge2Lp3jha+z zfOr&9s(T%nDWysADPqWmoh(5Si;TSKe>+L#ta+Sl z?nm+N^R;zGUtsXSz_<(`Nl?){ch$V{+Ik1B7AiIK{|Ln!T-W$W=T{8*poY6C4?xxy z@$9p$`Q4W9S>RQ)$`5I)@O$-_Fu`CM*p5`9$z>SA+q^i@)$b}7yv*6P?j^KImOF)Ne;-`2+0gc~rUB)d2syeE(ZUTmenEahO z`6_~Tjz1xDv*cb$e0CEh1c>j+HN~`Z)nVh9HRU!vzbOx`hJ&}1-f)m}uI2GT6T-;{ zne_(AeK1k);qJ~<^J3t?+-pD>c(}DQ{?2;5LTY;BBendE9^QsTmqKSIYWlWNn&G>0 z^3~VJB!UXf0bwb*HH~c!>gPRb|X#!iNX$XQ=VFnL7 z*&prY0aR-xJ{xuL2Nn;|5KN)DTl{PuXcJk@!CwC8DXD;*9vcLtQqITTcA3weW8^7X zi9EfAX+=C|fK7h77Xw&{<)rmqg}uy{0JDOiKBO1W>B0|#Jp8P}2$?_zh;fKV)@e89 z&zr}5u=$TbV}6@c-P(;1H?4I6->M97X=_Y4k#7(cF6#l<#51Zh4kb%91S8X z%BclHAYJJ&NT?`5>K7EdHgv&^6;Q3*&^7kTztXC=YW>B zfdJjn$8JKO6E5QDz&T*IQXYp_A|eQo`{vV8@gBdnWn8RUtY+nZZG1C#Lufu&vGyH= zX`)O&Wt&p&l%m(#FYAHXuk}$8V|nH<;88|iyb$h|k4gS5TA5BWg&YW}t^xOm>Ye%n zr%-nxpqyl^cFT@%8;+t()?Jy#44l8%_Rw>_esW9{tA zoKO|`N6ND?LnurepRZSJgt?v8>IW?r7V-?gaN$2rqLjM6l$L&k!uPfr%(e&|4={lu zT7Eb5E6Suj5xPq7mqunG3*m3+78swbwM(zvDlWWu9o@5QaZE(%8cz#B`Ut7hb zE%^l^{O;Fr7_t_cF)NHessi^EB4Wo}XvoaBGs8PKNW#epNWt7s-* zCh2RSB9G+gWv1Ur4WMh*&uP=i>{f5Y&7GJUs$w+tYQ|Jj#z*7Q94fSC=_6dzj=2L} z;pRR9(1z>US5i;uX1Am(-mpq%+p4A(&LSC(kkxUAjH)97tEh<#O!xK&k#!83SNnN|+MMl7}_z#p* zV0~Z~Sf@u?2RTJ-;FfEl_{C8In-FFdQMQ@Ta-x7xv*wj;0a5S?NtaSGzU31;S1OqJ z{dmi>xiT$h59NFaxkNIx*0Mon_*uD&YveK|mAz}28y zfKY4L;FyR3%i$V``i|?GkMxE1_?8_VmU;{d!Dx-UGFo%z0p^;TKlA`VQGk&!@(a3m z;i>98^|HC3QB&W~y%IH#eS}xURc)CQh9Qxd{3ja%vS18cp zT#NH^vE-X+(LIu~R+>P+r+@+JH%Pd=yI+vU19tpZ)z_fMql%ZH-J|cIGeUaYVR*s5 z2ya~z0%1?%-k2ksb|NKBS5F~w+H##&5mc=gC^^ACoUE?g?02|{?|%2Jg}LqeUo4*e zk2Ut7iU52DNb)@>YueGBJbC701_rV4Kxr&q0Oxo6^|-hs7x-eJk;3LY1F3#Anws#w zDT!t5rL@q)7J~lOH=#qcBO*niXm}9)8cpaPTaMRD$tmug7GYx1Ad-|CK2TjNoA+$& zZyxR+!=C9zqwypR%14x^AS}~Q2iz#+9^YK94KCv)>b0ii7W0IFD=Uf_JHGomNTR(FI+@d8UMw%na|eUp5)6u3U{?CzgxTO#c;67=$y*@_>>A)aG9^i|Nmmj!y#JUKHAhLY{P z<}xj6h(TD2)4|!Z_?!kw;X_?txWPf{qqvfq+nlh!k`m(oQ5CCocXFJ&HG#YgLHwZv z%r@oVz3`3w=V7`8qDV?w8$yWqwq5#rg_s&EaB8*C$-P`8k?K@>8v!`-F{Q@>txIs4 z%(9@208FAMd?|8_J*)qNN6XA+u3D!05|x4T;3Y&nYIrsST4SHoNDfIzl;sc7Ce|mI zn$|wtZ0@!@>DayZlu&R|CWE$l=1gW>O8KY=^ZX6AyJ(uCSbw z``0?-G1VQ0nT`fA6GdvN%lR1;Hf^2sm^Z!4hVq`iNyNU7J8dkSb}#*&^lsIU(rBHq zl~@cG!{InAT);099229DuFL!l7we0d)7g0hV&RHZd&j7)%Oh|(%r$e=SmWN z(#Q`kCa{ApN{OuUo#PulA#D7_MSV`aM=N)D;#&TZWpJAv))Qcj;a2%26zz@)Rqr*Gj-U=0?tZ-7F65Wo6y7! zq;_;lY0WNe8@w2Ht>8Tmt?OB?g;c9x20*TIJ~^eV26fPsg00rb#!gI&ffu!!5kWut+(qD zlWWbuNwEeGf=-9>L8cWyF)y9P`KJ6UtFW3~e{3!>^tPE+r}UJ?Jkt^Gk&Cgc8fzB~ z@|5FL8Wl}PohIp6gk`jyiN8yD^QD3trJiCl15Y(OZaLx0qkqfGrzb0 z_7kB8EXk&7MGh%k*n`2B?bUaeyfK9xWj|Ne#H;>U$@*gB)iWuIYo*hx;2>w5d8*ae z9EjAmv8xuSP!d#A^;ONF-YNCXufzCL5X9iA6@fBNcHpyC0^HMfSgr-kOPUyHc;RQSQ)#`-* zAdYi^0fEJMaIn0{wX`ZXJCtAG;!NeUl)5wqLuW8j$YgIGO5YzG3E!|xLZ=I-)R!2# z?>L_de+HHpc!epdsvY5(%B?igL})3RSx6TeX2jF-B8yauE(6HylYe&s*73lTQ+kY? z?i_WqgP&7;+lc#iPh9p>&n$h|_smdI9#ZLjyurLAO;ef;*?||BqjbH|-8!3agNOS@ zUN!kAW3;71xtnk(XH8mn6s3M#n(|j~L9USlMdC!Hp9o!0@M3gJiQ5qS;UqQ}_CR$U zv2xaMUCUcUqoJM3FjYsf+bC{_$habYx?I!@^01Qo-11&bdiO8DR(#!kQDLc)5hs@| zVU~QRN;2OLzA zjRhYqNb(gfk-x-hP+F_d=hc_Dsasg^d&DKG7GMOK*2;_}sxFSS9-2Qc)J><9)vDWfc82IMy& zBYTom3DGr#5I0Ic1*e`o-M}@2OE$T+%E>jZ=AWvIkhUGt^HP)8)ZL9;3Ws$KxZlZq z+_M*dr>8C9rzAtAjpDLT{;EzaMvOXO;VH!?239tHee%Z5SBw%@uGVRU7S*ldu7aGD za-ax*e#HM23`G|E{MwZG>i+xlD7i-P)koPp{00=&tplU;qYKtOxn#B0Qnf&5pgg?c50Q4+7hoK+3QF#Aw~j2hE8tmV*;SIL z{27|&;|_zHGuBoCbi`r(B()OET_@XtbaC$@fRT3l3hQOLq)Rt*>ajokpp zB`t+=1|O!3@ezn5EbR;it57nH`UdVxHfM61Fn7s3FTx|C8AywvA7}H{v_e>KTl3^R z$~63S`f=lk8}W~-3;9m8knw<&c)3ckX60y0sn5rZqa&dYbEA8m+DJoh68+!h!cHX627#^>f)&2T8u%QWIVn$ zAWBF~M?Ftn{wn5BWX?FNEmR=R3Q-JMp`Mw42-#BlSV#Vv0E&?h25y9`5ByCdGnMZw zf--<%x*9bG+kA(g+5t!?G+0NI*O!HHGK@cdn~;TTf2@>~+d=xQib{Pxp)E-OQf~sQ z4XiF0{Oug)Bm-j5&qnt@JSknOX2_~PZJ#U(YY*aqWKu$x@SbM9q zsm!d%H{Qn|lI|LsPvGIFo|+UlHLaH^j9&`=1v@DS33y2i7gQdQQV=uT&L!ihFziDf za;TU}vZ7Fu(!$yhJlIRPRwWKwZAM)XsJ1PUW_x>C{_ly^V~1qCMjGl;!3ZBo^6{q54=jwq`&s4cw^dUhr{ z%tgFgZWB5_A<-`3_8k+j65AHXHZcVveUW=wsDo&5i3~>t>y_X&T9DopvYT(!0-{CAW&wkc6d4VTu{ z2lz?Kk6a8~uFNOv#`vfW)EEeplo&>omsR>K+NANqgu}yyh2TQ10<$*2b{tP6X`8(B zVzk+zfl;n~4kh&|VD)wHd4-B$&uuIX?YxYH$*{JuB+aJYO~yJW^tuNE)+`oxiZT@JEo$^b&SSky;X{6JdoUT=VHW}D-rHqB5J;W|M6P=J03)69? zMdlTQlF1eiw2aps55B$@My#D0#&gjLSJsut81U@_398zZgYFgjsqUJfvvLd14=X`P z%Qt_ylB>wH{xtVoKxxTa-FBeSaN6L{;&r^;hYtPb?1!^zjIVcE7H^ALTE#=aoO)3CHaU(*o3l>HqAJkuvDc0#4uxR_*PGpaQbJ5 zPM>8iO7G8eZ)YEly8_laC!`vQ5=sZ-$N*{0H7!mO-q{07HBRXhjdF+e?;qN9oJ_Q3 zA)W~GvtNlj{0M9|1?ltIsZ3VGAXLzbf|V45=JE?kQMSqa*hQ){25scK{V+QrPK+}- z_nR?*2m_I(`-h1r%Q2CFp@L0n;V~Hlnjs?~%*=4W7E*}fcBj;VHrg(1KawhseoSy{ zaU9~Hps!}{Nv3TghZ&DaWrz>xdm^dqJ>j|WR|W@W=s2c`#6hw2O;x(`M#Xrw4TOD1 zIKa_XXlY$oy{X1hO+uv@s`XHh=6D|PzXHqtVV}O*(hIG8Xv1(3vykK5jQ3+PP*$u| zbEDg6LROgzHnz^4D%GVmKJN=IC94=@cGkuL7L|sN6REGf5Ue0!PrxMyik{x%3@2Id z(q3lR}sV4@ph@yAcX6F zV|+xDiT0wF2vL;8ILqkL1P5%3Q!FH4Z!r^r?sVa)-pdRUn5-|IH#Amjhkjlg)4EoV z9flfYzETLHY@4W7K?)y~ zJ8-FruGp*1ZAOM<@9?T@Pouu_8!Yaq{zfnW+Cib(bak}x*DJ%gBDZ=oy)gSZ>V*NZn-T@?PrwEb z1Ps4xXfdR0cBLw$2Hu|Am!=a2yQa*jWx#|b?peglYw{(hUi&v1wGzPzT8<_C=hMeh ziNl6SrOFSC0EPlz6^y+fVL~9tzdR+t+K+slEcDGXeot0;EH*TD&x`63M|;l!Y31xa z55~kfW9wr9;fwTH34kkflLx<>^J}qzu|7a(U4TM~{4fn3YGv0=#H9=K!ov7`N>z#( zg(4I!53VgZEnK-=vFh^M3-a!JD96GDj`M;IX%mAZNWQm%F)1VFbtFg1nC9%!p}wY3 zRO%_CSsXv?9Bqm6c+i~eq&(x<%w$TnW-vrTlPya-MJArUyHDNZ0p@kl6|~&aYPIZ# zLNOHe&H)x#f(o`{t!*ac-96^_W4h0K#2$EF>!VA51P5~OL01hzt1ww4gDzpJII?AJ z8HV_|5gMfYD@d@&7f)L7-OovsR3X4(TdEko(wUaU1H&>W6#g7KE!tdthN#OHsVq(o z#UhWQ-FJb6nAtQd?3h`2gp52%CUi2;!VLNvFJan5O6px`aSJBwn|*IPY8wS>WBtq* z*SK-CA1-rU_x9z4Ncy?cLqJ-m6pdEUFfUA|KbZNJ;GvzcYg zDK3P$zsP+G&5gl-?dNHi-F%1)b$)$~r}TOEjyQ7L@^xJp{ssaAbi=rH_djqE{-O{3 z3yh!y5H@kOFftL6(U1a25SSWRJDC8a{#h%1mg5`PI@$k03>bO*yXO9{TK#{%LD0h4 zN#4X!$j-*z&h`(9fc=wsU~T89^hsziAz)zpyW;<^%6u(=fRd1e1Ut(=#rgkHsILPM zH?c4?cP3!r_*cz73ljrC&BFLk^*%kzziao|xiq!eI5^}SWl8DiRrqCCoEf-i)lIlX zZDrWRcpXg1sPv?Wt++(^$%y6X6a?vvBq0Agy`0Yd$btvq# z*`=wRm;mN1l=fN-l6nF*bhL(KLaaQ>+)f-!%FOci%BmD5wj}JbMke&OO8TNy7Gf4; zPK;2j*2)^P=0@B~;u2aK3aX;SqMD@o9Ly}1#HOOGF7n!X%IZvVs(kF4sw!NH0y+xD z+PscRGSn)(Hbz1WQj&a9P;89$tU4xadNzvURlKQ zcACsOf~*Q`E}B}xrkteQ3=V?KR=S4dRHAxfA~sMeB2tWG5(*^F`aDA1((Lj|HgQ1%uoJO&Di z62jDEdZhY1vh1cb>KX>bCgu`s5=!DGc5KxA)S`scv?NAWjO08j`o^;CJjD9)+BVcC z5>~uAvKmkdJeD-v)H>EitP+MKI(mv)Dr8EICJy>4lm^-WQG0tkJ7zvR2})~DO%*2CORZ{~?MkauWlLiyDur;$Hl%b_QFQ<{HhAAaGK$XXiPl!o|MO#)yN6(OjTUAq7 zK#<)^kDXVCo=%=g*@;HLkyD*pK*o#+;9zR*OvoWa#X_a#`#-Ti{_?v&9;yY9ur)Su zH!=Rl&lSZ42^d*e2pImRfcy*m@z>^m!$|zE&iW5lf|-&1KOIwDM~+v-L6)DCoLo+i zpVWk(Pm+t2nw^5fgjbzMMv+yC&q7zjOn`|;hfZ8rU6f9d#9Bg{pU7U4T11|Llpe}Z zht9}51wLjLP9X_0W=V2AMoVrJXGsYmOEwcWTX`8n9ywMeHWfK> zZYX&dRL68m_KZi zf5o&v1d>mR3lkF)0V_TGAJEGu5`~eDjfI1NO>`3L3lH`nD~oDq6N}OI|CaX3p3mQUrfe- zxH$hnTNs%CUlQ|g4VYLy|LOlh&Oe>_ze)O+H2+BYzYo)YgN^?3&VP^C|Jfl1P9}e_ zJ^*Po6$v3BYGDc4PkawR#P$=IV_|FdIp*35*g9GK*S4sIqm#3cxq%}A6XRbzpnuUL zKenG8O zmGw^y|6~n)I`6+g?+wSir z3*(;_W{$sF|N0~g^QS%Q-xtK^CBgA0Nc`Dm`D|rj{-f$2dsYG_rhnOdZZos#{9F0I zB>Jc01e}ciWJnAQ%%4e;f&D)z4;up`;E#HrOeG4yXR>8v0{G0N00IC(fCxYeAPtZQ zC<2rKDgaf02EYJd1TX=Z0n7o`09$|^z#iZPa0Yl#ehzPce>p89BgY>El9|)L`b+Rn zG^1r;V<-62_IY%+PmI;)Dd_ywo1z9b7S}TvTu>Q+Z?e8p(nVudX;cQ@SVI*K{W^F=14^VP8u~GemV*T48 z{_REp^?#IL=3x8};r=N4k8uAB*@>Nl^}oBE(7(f=csNaxmo~B*+OQb}laU%uvU7b# z9aGtwEw!RGbU}bbeH;3>G8@K|AMSOG;Ig-Fnd7V!!J z<#zd*R$0Zyv@L$t_)&tY*Vz6P z$Uvfb3Qxvfj{$y%QOw&8$0NsrR?x}V2=lW(r*#H!9TWBuJvlBUSAsU~^K5~LZMljB z!_7KV7T-ovC6uIugkrJj5Q&ZK^6+q(p=+tP;Hxq(B;-asR*j>|;2=4|91*0LAss`kd#buPJ2E~JK63SdVPCeoXeC((Bz|A& zH8DsSKT#M2nP50QkziY4(t>#ECgBo-#(>SP+g$KH;Zr=@1;_<&3zr2Offgwj$?sKN zyN@a#a1li?%QV;T$j4(K$XHDAax(TqBwpqGF?*bu#5Ni3xk5(vL##u!&)w%^lKrhy z;(dq}Fqf1=F1j!|(U_K$pba~)mVN#PQdF?puAmq63CF=8sZnPX->|+hC{i_uxZpl($F@m|ZG4@cM)Ez?wRxb~tZSpsp73FVpoHV}qg!iRqkoYf49qk`O- z7-6&rZe~CcO z>x7bLm*$Yb`elR+y8FFAp*mCx%u0RfP`x#z^{HVSQ!#cs(wf=xy%kp zUPSp#wT|_`P<8pz>qmpMSJ#Hu1bgwAy|Gru+y>9^?6C%w&p zbfVw%Ta0uVqxfRm^P(H+^o2I_b`j;U+_@Oi=4UJzrokLES6*C{(L{k1_ z1fhjF%eGs>?pXn*GZmYcS+Pq#$^-h!ZRmS<)!UG2(Xb>6U4>l+{oPV(^0lGui=9H~ zN1r~IOsSQ&L*I?F!e-lnwNQ0%_pH3*dk4kzEJN)Yuc@4*YBPbI%w|R_AmnKw_xRGH z6ccHOHbdp$i7=G+woN4jr2B|SvotMe9|sp7Q)@Pk%yi+B*Gx47KKHj+)r|axxREhG;w#}9 zb}HfVidx*OpEKrRq|_x}9=6`RCv{5cet;?m=Jz6>(D7D2{i;j)bwBr9f2`Ig%vT7W z$qbK(9J%*A!8}g@^Nh6O(J~smEu-;(y-+Zd4J_loK?FAXb)&S1-*{5$$ayJEi>zA^pEqg(c%+hrH>G4$3ng*xuJe#dMhR@Fo==yY1N8x+$G%pRDZ=lJ>lmZ zHSW~3l(tdtB3EVzxQx?CYbdsg!V;6iLX9%B@l2zvHlO@dl)u=I&)B3t`l>kjgWw~_ z@?zc?!hut3$#O;4hnVehYF_1XnDnJP@R&@4EvE)kNk>%VFd3A%r@zE+y z8**YVRw5^%Y^eH>|4r2Ed4pMAfvB!Y`%r3+>QmycB!ovoS%%kz!NM04Nn?O_u{zr7X+eKxXY*Fym())@nLCcX?3QYLpU4|VhF>HUxi)TlZfc3A>s!mR-e9jjJ=4grOX+I)4sEwVL~{k9~?>Uqu3 zzr6zdF?vfWYVR*7Nvj@A7G$L0TM?JK#(s^uhAP;fQ(KD8-POaIOIy{#?HV8j(;zsF z{Jm8rm!$>5=j4p?{hls>JvslHF8m)R$Z+C!Fug{0?4;vGx737l9~K!4dlwI|n#GEm zqb{SsrRxT^4yEHocC`F$1;Pa77<1{_T6@A-neA0a(}QIOgnVC(4A`p_?9I8qTj%~0 z3%Y6gf9%~Uz{$<}*AxeAOX}(5dQAcx2SSm}I`@VC4^V?wSrKsX|DW*wi}_zbhx7k^ z)&JdYrW&rc*TMFjU=0RvYW;QsdH%}@Rj|#wtcAM;9Wo);;*wGXdyVe1!g3xuUUXsc z9~EGdj;!qblLhdg@9O{H;iW_7`v0PakB$$0!}&W8V1Wt#PZq#|=FQ(Z`02p=@4sqDUQD3l=*>=~!f}tz2y_=``fDv=rq4 zFAFQ=)UkE9w+6M>JT6k;Wd}`u(h`Wp}TAi&;jio zq2@D3q$0f)kPKJmk#Zq(Mflw-;GNSQxCWFWu*2U1{AmpEcmIdQSj*bECEpeBm_jh6 z6oF*0>#u`_>ov$t#{o7xM)p2NB*AJOYX@hz6)%FDAU`)J@SP8QQHTKDpAY-8bbwC? zAuo7W=MWS?5atJWg8Kx&u>!xZu=W(CgQEjdCtwabMQt=@{NT+KY%>hb`&C-t>kM6W z9l?I*_b`TUV>Q+FrGF6opG`96|!9e?sO8uC`Xc0}I~vg~5wA zI3XfAK@aiQ2XqRY$Y6{OS1#~e_SV-`XwetTT81G4Loyi|NG2l#Mah5z zo*|9AJgk+MN7@IxfdumMC=+TlYBFjOsuQh+tdJR=`CtzRFOUrS1<#O-j}MYT-3N8h zDR>WQpi}US_2AZf_e|h;0HAgUeT6@iib1@_3#clh!~GHROllbSJZQOMv0)8XevY3 z4iJH&0Xt-a{z1Rs84{piq2gdE%nV2aojS;p{lLLTR4XKde!(*&gHFLSia2T$r1=de zqWw1{2j)UjcpPYk_h_hOWO%@@hu{IPfE~T%K@MJCqy|iG4z9e&0Y_kX{J!!d2VfTv zK&yUV1&~8X2rd786+#a1t;@&(ulw4C_YWy4wCeX&3OS^u(emF{Y2*Oxva)E^@2f0w z$jPDQzprx0ftJEE5_fn(A0R;J9V|ni&@1$jmtTfoKwe%-7=DCgrG()b-v7SBI#4Fd zy-x?3L76P{5h;Si^0ERTFuX!Svhasj2!00z!o&}LML=J>;%LqVZWO+fkknBMz3AD-pM*x)Y4A_Cm07*ayA(4eK z26@MaqzhmH_acbr;2wzs5b?Vnu)|Cf5JA}?rtl7mf#M(zXqz}d;oyowjTn!hEO_9j z5CRx94IT$q=omB@(t_v!*MJpB;6Me$5fbwPy})l!0hAmKtYfeW(9-}k&iOLGk6BW3{8b# zLF56ld=N?$5d;;eA2e8q$={KOlH|DsQInA*0=^wI9vDyn5&#p5N4-a{&?ZDXjQbzB z|3!wT@OoaK`OvAyrR7i1Q2~j*b&_b0c`6BW};9c zro&huK*BR*h3x{y<#%rb*=2v@4)sGqXgD7Hxr~AK~aXTp`8_AN75KY1Yw8dXeeM5AWVP+ zv^}($Kra!@_o<*`2Yn9e`vK;_Ys6N-0Cm9w5QB__rlUOqQU>~6;0+o$Xf)J_vL3LZ zi2Ozcia}dC5Q#J)C>m&ose%Fn0p$C`XhbL=MFk^9qk|Z51MOM>J39CY@Ik+U4M;Bn zM;sMii4t&PD@q@aR9aoq{TrLrC=Wq5Junwxc;|S1bLw2 zf5GynXu$qIu|lgLNC$Ho#6<|>A0G(aACLdr`~L$w=yMUP4g!Y+>L6ghufK!%yX+te z2UkS<|6F}Q_CHLjd08INSYSG$y$EcBKp21w_PelSKuh4H2v~?rB49mW z-;Xco3Q?5zZ9twu8G;ME9@HP84xdr>f0FErI$(h!LFWydstD3iqEl650UR0(2RtPE zfT>`10Udz@7t;3kyHdm*Kn0;bfE#Fs#eWuo@dTNEp$UQt{YO2Ryg*m?*&*pZ+F)#f z9X2-{ZR7-`c;)#74*GsLjKV$%&i;_Rgj1t~4j6VC5M$^DoCd(&4^2< z;1~=iCumO#ieT3aB|>I6NTT=Py%Fipk!1R7ghO5$04?mWfp*9Y-yjh1_JNZECi4jb zv!&20#2Zc&;N%oF9%3TQkFbO83e4wW(mNorL0mc1VJ39}uk1200zP=&aF_>Q8E;167YqTrMiF&(53>KX(M0T~7j zaSRS*i1A1)L8Ac40$#e{u!I5#LI_^<;baYHMutoXC_DgkAPW1)qXt6Q{=^$4LxF~Q2%~|{cSWiH&~%SX7ap3VC4cp zh#0-!^Fo9m8$<)z0I@*>2eC%lJUkzyFq$S1i35Xyh5JCl76>QL2=Y)5Gz&HYv;wk# zQV=Ad4KWpJgeB-1ItzhDv;z&oU?BzMH0;G-lR*Ic53GUNNb;gE10hHQ2kZxUL+$8v z8<~hebwD4QN{G)Wkl>7_4Xg+3Q0qZg1@GWHEHDf-AT%}M#2hI_`j>;;080;Gl?Skc z<_bcG_8xyC{l_E}X*^IHa1-VR{6VhJcz_@*0G$KU>mfrRX#cRYfF2^R3ZVDvXs9@}9z_~D!7BvUOwjc!z>Y9L|NlZ7R3gg(Fl^9dm~;E){_P62 z|E=ivx$z{}ch)|G^40BjX5&vJf2W;NSpE1ubl!`L8Qr|Lx^JLHs-U z-=9(Ke@Xj$_5as1!oEN0IY9d#&g={MmvYDsb^lA%e^ZVczuy42wEwrH|3-(h!#qc{!tDCzva2i{!oiRP zmRvzL!aGO@SCarc9K%2bGBm;gnNNUQmQPRs4mt2O5cDlbJ?P;Dq_|}Tq`>q3oMXSM zM5i5yM92aqgDw-fBFnu8>%_oDzznBMkOSHPw8K6Hu0SK&kQoH*ThVzH6ap8TU{?lv zC8VoQ4F@Fv z9;gR+2=pT60`V}2NSu)6R%jhyKbWh4YJ>!U2&QGI6+j3al0Z;lrXlQL{QYeQ!VdjH zWCM1{4D~`gP=EJ>32cDrgFpuA(8K}ksB!4b2}*{d;17~la7HD|58kps9E8y^9l8Z$ z0&#-c!4QPXh5a}>(nA8M3Fag47SyAWfHuK^!Ez`bu)~~&7NB!#cm)9ix?w5IIyAz- z25`G?1E}Se*@q4R0vS~xDrfB#ZVzj(DC5RZ2PI~jI}tCB zN@-Ljb7>c@oV?P089%$ec1C7nVvgjQ{~-5Uvuoca={dZPq=waz1YkC>iT9`3UQ)a! z*6$a{QBg(pM%W;=!+~d<@V#ESEpMB2)}^jhF83HW>j*U-EdIubxL%ih%%u0>QcW&( z9DRedWb*D!q3ogwwww1pv2;vk1Qc@aaGSm6<6{zez4KyPD^A5p|vr|d7}p3qge z6KpTjoD;TyKl!$8b;)Lgn1(v;Jx&Zx3_ezNQM%E|Tlo z!eLosT@%3e50xIjA)OvB7%suu*^&@Wx_)wf5o29NuAWp=U4hIcMZScvwwSrJPf#!! z+ox6V3g>P2sj7;U%t1L)I#Rl1tc%JRuDU_)VtMk(O$xZi20f(FSi19+KQ11NrL!?E zPPsr_cHvB$LR(|01Y?D=s&ev_fNC0Q}2EWc-B^viEN zZXdD7e30-t>+_`eL{{#Ps)C(2t1$}#hhyFs4?R5{vl!xYo%;A$-t3!icpQ!=QPkqz zE;^KdX!zs>pNr66{@a!#(TB39iO*rYbrpQA&>lC$j9pc0@-%_~M{`m77EiV&I={(;A z+Zg$Y6AVMK%Qp7+?;J7S5EG?Wng4#r!6@Qx#9h^C22DSc6ST>S@^SVtQN+i^qt5D< zSX{JvraGc$rDJ8!cFI9cK3>F;)2$F}DbLE!#xIB)eMdS=dUB@kSU!W`*Y63BmhzU~ zUl8NPW3iT0*<4c#jmym==Q(|&t>F)gtg@w*#4VvmjP z6SgGHm@i&^dp(k`LH^~&4hv521Ie0WM~xDv=WcCa6YOl-oWGj17?w3Wp>Gn&qpBTSdqo!~;^D_0i%ytLs2dKT3cGRk&^rT)^wl$SEGthVheQgLL~rAR zgy{d;xqACVc{izP*vU0af;z>7D)+%IRhA>8>yO-mO+9s~=WkPJY&9MC6sKA}I>J}t z5T4Wi}!RbSHkldtCJky5meUg8g$}?T1SGmXEHu<`zy^8iPe|vvDE%nxe?Yp?CL*a4f-AGQ> z-3jwfjpT9V2+%SiuN6EY5^ZatU%PY0iE}pQe9`P6#VPKYEppLU)x2`KNrxYrw{^@` z4PdDFQJ0;yGIGWbyUW}WxVuciTg-OX(Gy#&`&8u9(2@NwK706hAAM-xsx1vvhvsmQ?G_kk~PMQ%xo3{g6jeIZ3UtZ}`@XS4Ejt()}c% zHGMtRzD>S922Le%6J2`Nfs(Z!Q@11!2}E$-%3vE(GNaC~lCcOo z603uIM=AFq?x*o5tg<;Ivn^)4IJRs<@-0(1C+Dx0pY%7>cNYs`e31SoPNr&TM>t-e ztYwe-J65tZAF6*{Nis2#IL$17;!P%sfx@QxN2c)WnJm}b;;{>-sjyLQ@fb# zr4hYMuFWEwFA0?8KfJj`dp5n%(``W3L-KTYgNW=m+pL?`v??KU-A@U4<&7uGm5&eK zxQ{P;!!$fa-doXf?2N7@@5<2?2c8ufJ&dZD>dbR-#+<|(x<(895#-;=8+HS7ZrmPQ zYWUUOU-!Mm0l!-8-P^M|2Kok05`!MN zg{hJZ%_M^S;;%wi{c5T7#8b0AE*2|a&k!PF2;4oj7f>3gF7Yj3;nv8N$VLgzJ6z?d zpT~SxR=Ixu+H30$W2axW9!O`#8uSi5$Nll@nq8Rg@ha1bm0$(kM}0vzhl5u?6%v*a z3O4h*EKkbaIPZZ&f`T^bpxaJ7!%1si7jsx+H1#GC*5jtbVc|$ z10%mLiqMWN6kNPH)E_8))+qUQ>v@iZ)_C{L^|!th*;s+Uyk};I+kL9tPZB7+$u=p_ zeKyVEBb@JS&e4WpGFIRpWX5E>dF|@Zd2gDd)CuA7+HYjN`a1%MH%}3k{n+Vg6%v+l3@#2@8c~Py)h=EriSIKv}OTld?ZC)~TU*zl0kn~eD zAy@lM?R(>}gNE~2pDuy0fqMh3A9Lq=yRV()st6m1u3>km{3_li)XFCwFWsuBUveXED6(H= zF3I{zeu|>2pmg2MKIU!5(nFO?OQMvKl z?mQE$;=d=96IbhQz;ym`a!Rm;Lb&>`Sb_*I;(I5w-F_|OtfmXZSEkLcS>K=!$#bJS z{d42kq^_Ucb=UCgHKlqzj~g2+@#@_^6DKF8bX3!65$@{O+Qq5&^K+48+?WsIHZA%z zl!;YS%#e7lwUqjr|0K@Ui&K8e)vjW8u5o9}BL#na##AD}oH(EI z{9!S#@^PG0^*GBj8D1yms><(5wuy=`KfN79IVpP=Z=s`+cxO5H`QI!>Dd)6JPdyx)IsA zXFuKsed_6RvYXNW8F35u`?2UFS5CclyTeJR({^oFM*O9)ulN>KoFq+5Os_>_eJEFn zDXH%Te%7X+x4s<;$#ye&DX;fyyOxl2vL>HEA$(FcH!d> zt6$DPU%qlPOK@lCWE&S@#0c$umq&Cq%FB6O6mI>$JkR(AX#SYzZa8Q2t)HlFgHI*< z^ER`_-d-;8u2QDIO6lUGn6SzCpT<+&&p*1Sk)=!Ov2~7Ys$UpyyrIO&diw9{7WRhgL(^MUkXf{KiaMJFL_VQ~H(NA@~=^Xmpb|#;V*X4=c+bdKLiLF^Z#?j_{ z>P;{0*Gxht9$k94W-M-^v>iu|+i4faYOpb>%bbCR$+zpJt~ag&P4|k9_#+Ahqev%? z$?~YN{E6k#rY{u4--C>wy?@owK`Pf3d|&r4HPNFxX5znITs#)#nlRX7F!18Zr0Ryv zoQ#J_@yzfOcC{qlGY{;EBjgyZUDK}0zC0=3ApKcF=v&d1p7I=_@zC} z&3{!zRToSWF?y3e=v@ht<9Tp}aQB<$t9+JqSHDlEnIq0qoh2ok8n1HP_;GommE~n~ ztW~;3_Jbi}RT+($Jj@wy$8r3bC*ByMo%g*X-eYupeO-w+g@b#_lHrA$R3@#E&!sn_ zEl%NQjitkV>hvzg&!%ma8_G!F^BI&&OeEfSTPEY7|9&BDrs)udTV~>gZIRR~;_Xc1 zP7?kxDNLdXJ!z7cUoO>N**k4sRUD_#9X!Sqo|-rj*^E~_sdOnk-m-&pX}5aZoB7Vy zZ%1AhdcRnTud@^0j0)Qn(+rwcOi^w(_Il#jlE&c7pCEIb`k7t7$8P35+i&dWzTV^-*DXEX(NpBIUnN~U}b1m=qgLm zA$k6gw#EnFC*#&4lP9gVN%slGjs9WTS!#ai&R$Ymg;Gb%veZNO4ug&w* z(*#pL{0eEWcT~?JlzdAbyWPFV|BOl{vx7cYw@_Y{8mvG?P% zwY06-_{YOSG2(UIn3vxT7N*X-1**qz#`iEvToUyPeA%+Rd39p;-Np#Mn5`3b(R;0v zPul#frdf|Zk)JzdUc4?Fr{3;uRZF^@FtJHwO)(L#t4@~c?Zna`dq(2Qc~QKf&xKj- zLeac5&x?5^D09LS7D>-3T-UVBxhDI3qiT(6gK5^oX6o^%!1Mb)^Lnh=oDR)iMvqsI z1>el8ihS8)$(2|VYGU~lUn{08@wD3913f*x#{~R+t)D*^hCCl1r%PH{%QenAmgo>x z@{_kVV#VW8z1!^P%;5@JJw7Q_7l09>_iDrX|-Ek-teA8Pk z>$OYOx2=;FIB;ZWam2WP2s3X+X0Z=akD`_11OQ<-Ep>_U*SA zNp;N2WW);+E%CP+Er?zkQoQzD_b9qUqOp9d^}`9CdzKP$v#}5OD=UA#*}2{Q#ha(< zhT*JnL;a=7(b6N=!mSp#%?JVrEUpw!-pi}7Zu&BzqaK($yjGd75*YX0{L}5TzSnZS zudervN{`pr62@Ssa4IV}^>-|1^NQ$gRGtjr)R0{}9Aa>v!+m|XpzMB5F6VQ*yK+9S zzllU^w>0GZTy*PM=t{El`5<-CrC!sV$tL^8+1?g=hnJXs^R9a-E+2<9h z@FSKulx#TX4>|H$8XRq4j;&!A%vXD#T5qk=BjI1g7$wXTLK?17Op?h(^&CsjY`t)S z-6B<0s6hHLhYEHM-FL|>Bb_Y)RV|*emSrfOM1Xa|m69!Aan56kwsiMkYRG+FB zqf&9pX|>{R2kY~M*uklL{Ty#ZM%oS4PJKKhp17VR-E;CPCf)pe#SNYE4+B9f1X7Y5 zI5{I2{hib;Wt!>^?`6cs2EW=+vE01;p@oJ>j9`mMdPA;>E;IWJf3iC5iuC-qEru(2 zjbD2mOSof3xfw2)5R=_uC&#`+F}aocOkEaR(4eTL-c+B-)h*z&Nu4nMs0WeQ>SISq zQekS`0A(A0j{M+DK`*8qEh$Znvpq9E=G1Ve@lizG=P#=MwZWUmHv6%Mz3E$HivFDR z2NRNpcmwhQy5x&&boXu<5FNpgaxf(@q$9qU)HZ1LX46c;>05GK5)l#chd`Svl6<{l zH+qlq6a?4j{+u|GK#HYP#oxr+C8Su%94pIrmaXe!uGCYHOO4?rUt=uW*C?1q@74B@ zvvxD@j7ysh2czPsZ3hQ0jvPnjGIdRkWh zD33ZQzyf~;EAz^2A`NrPn+i4Jv>nGdGdp>m2fPH!&Q-45Il-uW>KY5b;S(l-Dlhq( ztZ>aS>WiUal3GND3~gzTSn6JurS&nNHfRs`qvLiz?@A%XFPt-xbaLlRmh0Dxp^le7 zMt{D1NbyufpR7<|;O!zK8-p(KF|xuUi>pMr5mERyqhXg$F1v1Z(igX>#J78mmg0yn zFexqhRb1|^JrjT4P~^A)*%fM9edU|J9zU>EqBA=*eMjT@K0OSHCfc+z3#lncX0S5S zluP52O)BMfx2F?VxQl z-hen47jKWxu15T@V-oMuFpEgyN)LCH0T0Jvd%d#6q=HUAeIC435~KLr{xxac#MF2~ zM;4SxQs{4{J$U2sj#TUno$^&|XBqeV=J!6tsu<#&_0PD@fgK{=aUCx+{8=gHq?EgR z%W^tL^w*I`8hTdwj8(B4yd(8TE*>y)#D6j}p)4JRFAn&GQexe0DWGVi{2= zt0iEb*!JT^P3NpO!L_wAvlW`&tWS?mpQ#Q1$k-n-*hwTY&VPKYl*gW!W_cG^&X4s( z)t2rYd2q1S(BXx+_!yZX_3#8M)!Uq9LF=rIrY4Lvo%IbfbglDd!IOA9ovPD`aht?9 zzdKqiXpj`jGMGPIaAo>9aP^t(uYqn`Ny@dsd5_5@ry?d2+|So^likF671oTDhukd= z+Yz5vs8^O-4ZjdDeTr;wW-a~UE$99L9*-P_3nGFxE7gnPzjFP`i_<1y6qTR&&}0DkT_AibgR%KHsn4 zT=JntF)Aj4|IA~7%phM3Nqw#lc{4`AtRAElxJ);Q(t=f_cUEal60hSGAL7tq^ei`( z8-LR2MdPa9@>Q0q{M7{pEK#ZO7Tc#7E{3rj>&!j&*=OXQ?>Rp35MSADuozVqUn1|X z)|wj^EdI{3Qt#VyEQ?m7meY*xL}B-)tED36^1RIYbC)iG&$V;OuM4sGJysjb1g!(vJ8bjtt`3f?f35fB(`L0<=I%1_s)kmQ|wr29Lvw$Nf6X>K5wbW z(DT9$FS0pU1B2{Jpz3iw7dqpg(~bQuRuX)eJ5^Z}lWpw$bR!y+_$TMCy3=?I-MzbN zb>qIT;9FU%2&&dYEm7NS^9hEFBNW3a%#nBuoO(kA$=W1$Lh`cQrM!ZBS|fY6^y=(y zFUq!!Vsko9JQ_W=R~L#U{k$Wu#i!_6S%XXK7(P#HssJ_?c~ijA2R0kCfr71MAB}D% z#qjsbcy);XYQ33tqKsZ;!sJYIt=ijTZyHb7YSO;2eYMQ``DN%{W%l^Z2}6Rq3x@`u zFr09z!KCey{8f*E@k{kKF}KT``p_jORnL!Sr-%wp3X@{oCKLPGlgXp$gDLa6t53D5 zL%o0?iu)NuSA!?{rMpCD)S7~Pm@OJC$Vr~6Wfo%?u~>41n?{`^GFZaAe#G=fyzQqQ zydxrKO2{T~tKJbA5K~JA4ZNh|NQyY|L5J}&t%O%DHkHOL??#>)0*5!jaXn4xAG2|_ zPo(=$@+Me+@gb~Nts6`i(mUttT;A!>*RI11JK6r7EPop}+1LBh%BF&%&toAD zmMHNv?6(}kW>?P3%*d(Yy})O_N|_Zucs)=10i{XcFR9JpflKv6B__*{jM7#J-(|)> zj5To``n1US^aRfe1`J|^#r7KzcY(N z_~<(=OIJKcoSYXj@q;&8qr|Soza9A2{yZ3Hq+&245S`(Cmhu9Pt&)RvV zoQT9pmS1``JUG0ksdZVWfN#~c>e`~-u#dN|fpS`!!4HS|aoiEohzgi`*Gpn4pDOI-g-cD@{R;|vkfNV zHBV2;;%$8B3jdCsog`1x+#+`K+nyo0vZD89AhR*v=2QZe^Jg*hDz3U??&-c~L`3|z zjAU3!TTaRg4dteejZoEe^HQrVvOaw`6!w{7lzXdoFHXO2JH*S}_w_wYraxqam2smORao{69;#`C zeNPg#a+XMSmHoGI$DH2x%uXdlP!-(#MNv7E;8bvwWZ;T}vvw`HXM6SRib&$8K%Ki| z5-gI5-UL2V+={$E=O$NtImm=|s`f0s7ThFCPgkl5Cp+g;cH{ockM-DotU{S{HNd<< z<;?@vUlN`rs|FkNhJ_bj%%v&q?4`Lhso{*-M%zFB;oaDLdUTHS{hA%U;{cn=kUinO z5s6FfaS!F9Ol;p6=SY z0x#1yF{5qNj4ew8t`A1#8rG}*zYzrXX)23(x~4O*llLl2TuGyxVtnNneg08Wv!?IF zWp%b~hZy&I*SV{cG2W$Zr?TuYQy0}8J)2%Ko}%EkQ{0{ZHL5>k)O_(|09m#^N$X8I zX;H?D-=!`n9VU)A_PtSCOVe}A^|_Fr%uE;SuQ^+h``Nn1MGnmq!`E)-etsTpD4LZ? z#;JW_d=fiRHed3bF)8&$4YOK8-Y1fT7WL_okzH4s)KF$bsg5U)TvNSuxt&fq_RMrjg^XGoN7axLsJ)A;uUAd)CXx-$krY?{rUvev`gDv1)wZKF29%aD_=Uh_*tW zqb5zCD_SgLp7h1t*VS)iR~WPh5A%+@1_zy?4*N=$$T}N@+vZijULW|Yn9ot{({06V zB{H)Mx^ZS41{&T(FWh^~PxG_*Wqf(6v}4unG#X;x#2lI9{dEi15_4-NZ0Sebdx~v4 z$J!LHtqAg(3zr*OaFZjON{Czx@xvS2NU?5r+L6c{eP-P9fce2$bv;fY+a8b!IED<`d!)A#S0Yz8{p%(f4{d|cI`PjxR!INR9Q=ApuN zo`IY)qd@=Viz3O>`d>>lfW!upAx z)5)Q_G^$kuIs{LCjeV9%uMDn!?h?-vI> zFI*Z*N`-a(P6V#b%Rv(lI420KYn%pZOtNx=N{-q_SPs0I%1yF~gm7 z0sBH!IgOvL#~6`v?F8kNA+E}JVd;_F)*Sa5V{VllR_)D|qTP~~sg^6HtS;0yqHoOp zJX&o}PGws$YuV*dL1m0tcm0$>>p=P{yZFbgTivaRn3TO&^p4d%4tl|Afc0Zc`Sdw9 zRSrvLC4xnK?SRC94H5F|T_n}-KGJ18DJaG-*39w};R5@dlN+Tqyxb{suWDXXIr_k# ztX-ep`RSE8>!R0{5AFG=sTaOJE*R2`x%Te7zhP8yVdRVL8^>39WczBrW-1P-v1Y9e z8>BnQ8qaw%>^73&x4w^MEuUR|oY+y17jdzfMAPqK3zh~QzkFU*l8~C;gI*Kw5S29v ze;Ua=?K|FD_cr8&R%1W7dKt$Nf7>cHO0aw*i+#Ts!yt(G38sy{gJGJSKb6f_%w%&F z!fzR*gwuk{J!I}OGt{?gpXdqM@nBKrPTc3vn9ep3Z=bo6?H^5*uG5r#oyoUqmovKm zB)K%l3HcA^S5|K1op{&A_4a+vaTUuF4jKWA?dqIMq<#0;DA=+IKZP=u8?%S7Pbx7O zVd^Q{(wn}n8(neOy1-ZY_DAr!;}O9ZU9AT7!*uCH?aYL&Ws6I=(&K+BnJhfGNg5J+ zUK5x6xF*ZCi<=US|1F(}bDJc2SEUwc+`f9-I7sU{eq5C=P(A!O74xB75XHIjkDU!l z<+d7F^Is)4Zlue9BdaLQA{@BHMs(y7H|hHu?zS$HAAOjqbQ*Q-M(@-=;HZ1tf)PPe zmm>Nx;Ev+l&W&dYkE7f0RL-fFY}jbpo?>P)VQPMMf%UZ2vfriGflK9Em$}aBZ5YP~ zek0U2VILf*zAnVsQg=MT%r-!wxMvXqTPSJPw={F`vh!hE2Jn4vtEt}ydaug_-w|64 zlh4Gan~b)nJEO<&<`|`S3R}=iT=6Q**cYSXw!u9!RQad+^hVOP#N=%k1CH)`JY3eU zpsg~oeq?YE_jX!Z@Vk2V z8V#R^bZtjH+2j5+)6yjWHnH#WxQ8Rw@p5uGF%@|!hH&hyG1f2T3xnY@URc~wyCo_m z@z;CybO@|%6B&zm<8w|EY`cppc&@yy9eDj>cxIx$rH@n3W~N-BJ@=Uk&3r~NP8gBB z$4L#Fl!Wo(JZ?O!5nsHP`Oqwid&hfEZLNOz+!^F_wRt6k?TtG&^+~gj3310>vazc3 z$2yy;S^8)w3MBl2P9oxh8zrcQp=7*!#a`sgqW&mEd++3)qmW^7pr}mryg# z<1UX_{-Ti}VArn_Y0f+KbEMeKU5w8DkPb#%@z-a`ig%~vy4fs}oONtZj4!iso(&GZ zb5`mp`=ZLTLvLg8y&MjQTziy5#&kPTo$!+e&xh%g?>9`ywYDcO8ZpU_;!;rH9^u+; zHu-6=H*QiM8bdUUkhSgKf|>>aCls>EQPRrd^(MphCNfK zw~Y7Mxd%z~ua1tUFh_J^<{Oe4j%wEVKnUO@`xL)KPvDq-S46@!c!R+Vn zY|>w%V53<(s@1_TG;`cD0KY(!hhRX@kzr}L{;Lbi$k!j6%p5Bk7hlu+xK}&~qEk$; ztFV$aZ!#(5d_bW&$&pqr=rj}sx^%acAno?DSV}zGNZv7&sFxl0b7orWY z?a&Z4KHer7T;N0f>zY;i`@n^6Crl#uj+|ghg~J}rlZBK)5z`v@+SgQG7;Yxr$FdIm zA;_e15|3z&JbA^`0ypY`Q@gzO9ZxDI?Qf*CIPPp&!By5rDyv8<_)H2YdqRsVoRwab zb5MIEm@Ks$7fTs3Vr?*)_OSw6w_B|C#f&LO(nC^(U+;T6Kg@cLRRp zUpV#iE7p6pnT!U5noH_#ni-A>>b#obHc2@GGzyg>bEx>NM{(Z00l8*2pp zlbs>Qt&7Zla^A0S|6pjn^ZbH-q5rY(vGI}9`SfBJJ=ys`$lIJtTGP8EDtaWp&V#sP z;Gygu{qVia?$ow!!|w!FO$o)MuM(cm&~tB>d*oKWRqmc9WmhvqE7&R*;0t#0oyt~p ziCo?Jp?$kM|Do-{HU!1mU zUy5zkzi@-d*=a3f`H0&mmXy>~h8X%5wdyZ~eUcYLH{*E7NvKq_jJu`S+osOS83ov> zeLF3R)j&r3I*0hKT;r2FX1iSt{e7*tSdM+8pWhBj>qH56m3-M0^up)b$Y@ZlNI&cS z_C(=@cGkoQm#%=ss=%CLbEA7Xl0H$QqEr;v$x5dT)!(Y632*AVZxG#V(g-=)nl&7M zU#O_2W1v#ZRG_;?Oska8iG_h%?G*7`_AQ1}7gc2Vyz%13Ys03ZXJg(yd+_=t(+ffu zW%+l0xkEh8PHLZvV)WxSKdnW*ys@gqX@iwsq~h|HZlE>i)Lr~sHInnm$_ivB%A#w> z*Dx&$jD=EtT}7f6IE`aX@gpBrtxx&&1{nV+rcQIgucbNX@_co7A>P&@L-5qPS;LyV zL88k_?M6%ePu)2es_%v3))*0i_`C8^yV_twIUHnD6fKzF^n<{vrOhc$<#fXH%YEQ%nwS(H2w3Oniw{lG%m7ol7?G zeYJ?Y%`;XQQ2g|=w*0PnYF1M?-=`>ovu#RrC6}Y5yZ0u~F!@YP)<>x>b7)9OblG_l z-W$8m$@IZxCHU}@GmkKJD?-*D@poANTp8smIw$cH&uf*q>%AMFwa_W=lKC2%FW22# zpH*0se$Ny_g*S7}KiLUnKR=jWHllr$>iE*QOvv`D(1@|I@0G1cOeRUK1Mteam(X&_N~ zSSA-aHSRpkMCIz67s0|R6TJ#$vXDvuXuQAQ{?JiOWRlw&h2f=lP)hzC&`8`Ue46S{Moa!Gv}y*F*iC|ZGNouVSwG& z`GU+$}4-LCXme|gDu@BWKG(=B(2J<$-|+L6$mPbIbErL`hE zxjPMl_PIrvgPgx&zs`=$?N-In#`n*c=nT$JIu6BqPWXBk1;+j)wL7gwy)~UtIQ@K& zN~K!qGUvA;^3;sLzNdJ6v(uHE{_1<*=jeAf8>KZa?ab+~=zl3S&eQoEOaI=NBP+GT z-s%K{oB9(XYWlAyAB?0lFGWwkU9kGWW@A1cLr2=Pt|s;+s?}k5xZ~U>`?z`~L*D*y z$7AMe9wUwCyMFEr-}M#Q@}-T4;!RHaoWZm~I?k#+=j#6w0VhM#&58zy46 z8QuC~mfhySoh|=5Tu!~~7^d0cl+3)bsEETmk~SBfqsiSmcfyZ)|H{oZPSU$BXHiC% zi1+2*#)-@cr>_H&f~~&a{7($bWU0h(NeMj*efX65NLxb9&8LN0OXIPrF2h-p{zo3_ zOM37nQh&56x$>6W>njb1@A%ShwKd$T4C2{6Q+|z1*&Hv6$TIKfFpHVL^=)`)@gi?i<<*1GS z;7y>kmXyAED}r0pE2m3(swwTm#BR@?3-hq1?(DhOlX=tj=PLr=5`TDU(j$3Ro2$Eq zT33YYUc*t(hX(I8=ZI64jq@n6gR|81#nvA-X(;r%=eJinIC$K@qv*L>|FJYDq6aet zi)ThY^Jo}D{)X4Hs<>hi^ z%5hCyr|Yek@?xXMJ3BMQ<5$(+yn68Snb44?k$j5z8@I7LN{Mz3G&0emW3gUmJ+q{v zSRc)}&-HyB)|S(M$be+#QU_@(cVTs?c2ZQleUX+Mw@-Uu47>T@O-r& zrg7f$iJ{nL4d%0B@eMs214dp<@k%0xH7%r#yB2gmvK!b{(kyT|576(*_7Y#$3DTt2 zY8p1~rPZoX3wUU)b8fwZx%aNHT`J8HSFy}YmGUI^<5P6B`ZyH>Jnxb%@@em+wT@_F zU^KP#I{yW?GAN2w@x?f=L8_$oRJusyG2L3ke?$lNs;z}^n5`k z|6#S%8k}t>9fL=ow$6|cN*nNb&IyOl;Kb|nd^(JuKS2Hb!Lon%O$!ALXbB zIL+Ivmo@YAp4A5T#r(V77-MA_<#wd!IF9=MJo|nsz}nC`HcedCYuEc(qHBP&SJ{bE z%(@!(dh4EDA)1k7Z&jt@KCT~~#N(K5Id|{lIPc_X4iDGU_nL1um@i#a{%Uk`(>u7B zsTVHpgQagRUNHkUNoTG(rv!BGHFN<+;O^P?%xy&~|WuLe`M`A9ujb$66*CVTv z_)))zcXxo9_u+ug=>NmmIkbxcEXj7;w$8R~+qSKk?i9EeN-dcsfRxgtagC->K=$I91JA8Q7}5QwnWInZv7x27kUUjbvn!7 z@o_J?Uj*r~A{T7Gz&$io_y}UsuWb+j-wYwz@x%}gsx>OECL8iPCFxdL-636lU1rGZ z%RwJa^BEIg^F4J|_;1~uhH>*%@qBUZ$5GW9QP;QGzo?4sk}9(}R=6%});b$vKbg=v;IIsiT8?{+H(gAHJd?bWcP`VwLEa&X3y;%% zpG4>4*s$=Rjrjs~RFD(Gr$h>r_f3)fE+5Mr=N|3mk1|;mAgr0`T4afc^8FqTbGu5v zJWtOnVd>uxQ#Vo4Jk_3(+QTZQryPy_>|^oiO7{qA&C{!Z0B9OJTXQ43?> z)4afPIU)V*iRciwiozy1gK{&t2Kizfh=obXJ;ApUw!Mgdh!!m2@A(qgU^dhIh`OKV~al9I-^=hHh zk%lDC6rU!NQ(N=PrjCBe`AhVwW01li+HBXS4K9kaQvLOP1;^6m^hjS@Q@&Yw?~o2& z(lIHIQUDydZx8023efh|>T~U9v%vHjM?a_m)$UBT6pFE0(Jh;&e=|^wO^58ct_r1~ zLxb}LA-%NaEM4GXALy@3`^(tlxjC7^=#5rn7&N(vD0&rK0fWGRTJFGUYFSce~0l#SwUcFrFepX~%_&BqCLQQvFtBM+;Jg0-*w*K@vF#MIA| z9t2Q$R~0kiKQcj;-@Vm>BT`uJ+I}HnAv`sbnH6!nfewp1-b5G13a3AA1&Xn_9iPGj zMwh(%@8Nx@_E+~yqM4mez`YDC=XBA~eJO|FHzmiabBR13i(*R1qmL(fsGrZixcrR`dBAI=l&!FNi&cQxx!rk?RZ6Q+$Ad>B z4MVcCUe~S!%btKb-Z%QWFMWcJ63o}4_q;k}g@(jE=;|qFtAcg`K^*hZqB*w+_2s z_))u`ktwOaM%|j73v2mjwur_dsa>Mh!%#Fpf%vF2cKI8Gy7ju%4^O|l#Ko`U6Gp#xvlZR2hobptKuG3_p&pBFJ+z z>KUQu*d|+&l?z!2fUetUl!3d1#MZ@Gzs%ext_-j}%tu+<{4fSA%*C&;O*zT^md0e4 z>*C3W%_$xWI0iv@9>yenKpLN=)lvwC-I4qnc>t;T%DMz>W&DLvFEoIE3q!yg*n0Z+ z#Gh)hB#6hNkzqoD=>PV|qx6ZhFP|%+;Q=7Tjvj0gQ47G;NJ_;49*-UdCf_9#G70~V z0*046Ev)$(fGkad4g}iE>=<=VW9xki!YPgpxEn~Vg+WP7mD| z&|X2b?8VrJSh=QG69VR&38&f)Xe~KFfw6Nens`s?-^wu--HNf}7Q}sS%RhP`O~Y{@ zIszv(P5#_&A>?>iqY)C72;z~TCwt72)lHNg@naT3ZaM;}vVmB$Y+_3Q7#?{BCHrMhu!Xi75nPtp^4 zeeQwJh`p1kF{!k%nNqtoIja2vLby;ok}?*VP=9xn2hTF2$<1QZ&UU<`A&8#eCoatD z^yr{mYIv}2Pp5l}NDGsL#5U&^05>QoU>W$M-UF$O?f+gR-#4x;=U{~vmfFtm-(*~K zF0HvVwC?5l$SKx4ybDBwANR2bej<{|2gTwaH$8dNWj=>ze>m$mv{4%?IawR>7H@_q z0F7^H&@z`U-o)+B6QmkLeWOi?Yj&q`h5|z}XoFCnK&X0G6wI8o0s-E6&*GG4ATAT6 z42@u!j<8~Kg|kJ56oBNM=jfO6_IxB*FHi{IsGrlkcoQ>}?|n6K7Rq4{tYgAX@7Z(* z9+5_6qcQ=sS{DaPF3mO{ts{q={Vy*ZaC^>Hqr+>x8&t_815}n+S)3pLvvWMIc_Pj5A1Ii)rKc_D{ebk;65XJ#p*~bOu1h1Tu z>7~HN9JnRn-e=0evZ(*HPX|(Io#V=k$3`$Fu_@sSRUlmfF(#nw&0(%{M!%og^-VMF zbgzqR{axb4PlWZ{s)+W{2%gg?R)$`58;qcl<@?HC@gdBaDu>=pw3ZBQO93=&*BMw% zY+T~R5Lj!9FA=JncCq4*m5`B76Com;tmp-V*y~_^k8CkOL)e|sybC0QsZ#vDPCefddm##flJ47la0_Ylp? zG*V-lC2D99Y|$wE4x-|-l5KY-m?kq=_0LceUjLtY3~0KN<~GoQ4&dbEk%Z~4ZS~Ia zzn4Vk%8R^Fq^FT0=NIcgn|QGm^P{nv-^Ra}w;rua<=)dZQ;#04U*_uH8h9?1e3p8Q zXRX@bT?y~6UtI~-#W+3h423HkyI=ns(QaB-UoVV4c0_q*6Va;bk#u zV?z+1yJ%B}mlmEaAW?a7xb@V3{ejP+&hBRtJ`CfQxBV#|kz@9Qb96nr#+?+-I zO0cu>e7DBCasMeDYs&^~LFX)Mc&}aJvGKPL%j`MaeS*4V^=or`_25Bj$6ycJUcOhr zBD;QVvSaXTSFycCJf*(5fF`ov3O;TwR&_0< zBM>x)Zp-K9nm8QYVmRJO&P9)N*ej?XdN<}nK!@Y|9_z{^>&nrp2Xxz}V6t_?30A`= z=3p~dv8z+cVOL5WLmGADBq*kP=1YkfHcWimWL`AI6(f!1{&5}nLILe6gvkk{(2TIB z;V)NL8t@LRdY>+H2^maJB@h8*kl2QC26yuItQf$eSqfea3vUxJK&9l9%h*hg2+f{y z0(J>G#|#8=IIdu8?X`U0s4a%o4I#!xs@-NJN3ex5NBLQnr0_13%sP%4(5%3zZ3dW+ z@ITlIn>XbyGt`5m=h(kO1>4X9$UV9#`03c`n}vPv#a*-OKO#L;m}RRC5_m|6<+A8% z4QMhmoP1KOFmakGlOQvSV~WUX#HLbG(gj)A5QlzLj{4QEExpMvsB_6_2bD1>zcIR| z_a3}B4joXIM?0%%S2nGVo`VmD?`0tcNA)Rom;@*I&7$oq3hh_bxbxK>n`uKT%oRBjr%eeB68u4pTo zBjj)(dAZEDwl4g{RDcHYmj*gx0Qk?6rcJ5H?J=aPrLaz*oIU0U7p`X2p@PPs+}VD z-P(yBqxzL)hdlLU*nR-91rJt9DYdPSYmUO_BDrB5>D7;N$qVKslWfB2!up3aNy*L zn=a4{;M*6J)4UEv@eO?jSkohN7|xu9v?MK>d=?S)t8pZ9Qd}!L_27xw#1og|jCwPB z-f!|8O%md4-Z%p>jc`q%`(m(9P7-#=nGNfM$x!XsBb8RuY|`o~}|RIkj? zoA)JRsbGeiFi3ivF@m9=oO0S>dq!$5oECI4hy2W;I?#Skm=N(F!(XTOMpK~2UZA5> zE+@}R?A+Mh9Lj9%a##aL^0@(iuN;32W2-nXpM224j!U&@{dyX z>srYek~SoC&HJHT8k6&ZqwTm2`!)4(G}hyqrSUDhC`5x4rKPMK_VcK63=hDn_7Ned=;~PTF!tsB{`&$}*FmH3v-~1LQow@o0JQ|5uE-z=c|Svh zsdgbEz`~k#jzv;>#jdolfIvsrwvSh&k}w64fCEUVX)nq;YQI+2>b^G4E-tNSI2|+Q zjtThSS-KV%wZ%4fIy%D+d!wYxb8*Wv4cs0ka?FmK?99&zydr8WZs6lC{+M=4ZX%uj z`@fh+`YE~y4}*dm$BQ6>2O<8|AKDLANE!I`O`Icvl)xg_jW~WQ7#Hr0M&Y@C`u<0mIe84b^QPzitfNY>uN}+C3J52XI*9tIHjU z$D9+>?ACm8-*irsu>La3nQUiTI&M?lhSidtvg2LGx*2jn)-Ex>i4?4QrDB!vv=ai> z!>=*$=TY+zyC6n4f6|J03@NuXznxJW=?bclv4pIlnNn$_T7#jpI0t(UcL zW0O>nGv(Y7ov)*B=dkV?c#LF!#;c zzsaxCtO&)3aP>TdVi;nl8(UkGg(Ni6tm%S_86*Nx59$e{QKAF7kiwB1P!HUGf5KogEvpUS$GKz^Xe1TBr>4_B#0QT0^Q#nCUO8lK=6OAm z)yE8qH@Ca0UsMIf{-d29N4f3@>UYh1!P)hzsmQJGm$IzSk2o;KTQ1labM3z=!=ic< z6A#n-xd-cTHti?rnOc+2&z~@nTCU6E2T4fLgyvbQO=Q@bD5E(ci+*-nVIx)OwDcP~ zmAD>JjCKnUuSO2pFil1P1WLX5GJ8%qIPdB5MP_!3i?&h0;7Vk`%HdafT|FQc)v01U zjxzJ<*X*t3p*P%gW3E2yrR={pRLfIj}B9U6QK zcu)M?C}2x?f$PIO-)qLdIkn%&#nLkdE7(dXTL$#+UI>Kh%W$NjXY0hufKh>g9<=QL z0#UxO$e97|S(uP=n;w&Q?dKvV9NI|su}ebmNRQ9Mf@@^ry*YKRZIdyxY1z=U+pG^z zo0Z-M5zXt~;uu)vD1`GgGd!8oyg>3)6{VM3fBF<>8X5gngAqS~Xf( zJ%o0}O33-(X@?Li9v?yWLsXrIh9i((Gu55h&CF~QMzzgEJ00|!KU%uxB6!-gqjJeJ zd95h&Tt+!KLT}@gyB_M>T>R!8lwSr{DB)DAuK)f5$jcXUoh;cF}cZm!)B#-|*aFTp^7 zrtUQGX9yMnSk!wU?23+7r5Vf7>VZuh#o4`pv`L%4r*`V5A(+xyk=-ast(FujuI4I0 z{GsgMLJ7h_*rOD^y5ryVO&a4MB96VTh&oEosm+?Bp{X;C_y4@LtUnT{w$=qXP|+&* zr;sqU`82+VwX-aG*y?a5l^r4T{AL18>bxVf5U=q?UxzA0Ou9c9lR+I@M(6{0-P|8k z4=hQej937ASSjG*XfcrMVNs)KR$4d+mC`)OlaTU|&KR#Z={z?Bu%Vsk316P{o%9O? z1680~HI;wpwnFpFXQMf2I*C%ptk_LhTC;uZG1pI{R;qAx#kT1Lhjrh=bciUb=lIo< z?W%TE5wN)m)hJ$y;yJ3MMv_ms$Tc0LGXP*=-W`cHh}3FzcyvI*mJz+4zIc*9SM(%3 z!6tT^Dq?6h`PZvDXR4(nZKK}$(8&j79}^JAu@Ng|^K<+Oie1_f(+!=8Z1S~M(%XKc z(-_XM8L+#w44Oy}Np7Puwi4M0nV!YrHZDU8scy@c_enXh`~l<~x-h9^1R>j{Vm#5u z>lcSP+Fg@z-ZDZsqcbb4PRegcR8(_D=Kr{4u(z#^rS;f(3pU5QMvnI(@y;v$X?w<0 z>jaljkrb?uDv9V2+)j<|tA4`$OzcVpkX~?Qh)y6=3g%6>0&zL%*vFq7E}|=(*v|Qx zz8;s|P#d11`q(5^t2Kh1p1)joTb}J^S6Ijjy=(xy9|nGFSf^?;ZuxGUfr4|+C{&Wf zo=rD~vSZtCdr z!6=tuC6@PhaK%GKGv2l~0KiGi6@{ae=0t_u22x~(qO6k;ydSi^zrP5Q3l}GXW zA+=y5ZHByvI@MgcLrP#ju_`WXQ;D=FQ_;2O6J+shWsPMqY^QjTm|pZ@QUI@PxB^^-D2S^?{01(gJlbYS_^r`V$HIumYN?7M1(Bc<~zx2ls*R>)R|a}UL8LaSZrvh{z7s=WPM47epm z06o%}|0ZFhh@vGR;mD@!#w4k`c2V@ znk#M2g3&U9@Wpb>3DXmvs;*=UM{wLaW|{noi@$4 z+^uezW4X)W9B;gF0jn>m`)~WM^u-Nvb>z0932GnP>J8xw(^j-$?N8$szgyalA%CNr zw`wCJf>=^0h->Z#i2!B-6d`0sfLR5An@LTWqt$Uu9akRKs%CZ$0EWXV?N^c2s(i&Q zr!Ez#ed3f&wWQp6t@#JUct4{yd@Z~2IVkHtr#?l^%6J|-gxOlj)|hf&^FZD zAsrx=kz4XN*UXk9GNO%@rulV}sbRZyT0H{Fezup&eVn+VbpjQr&ko$3LJ*}@ghC~V zP7%9o!ZBq`a23vG=o?(HXkqDjPyaRo|HGBAu_Q2nkraqpWx;YQprw35xI{+Kf5eit z0bpq~-F-jB!5@?Tg@MK%BXc~WlqjLe`l!q|cfGDRA{yu;_3%>EN);d=MU5NTh=a8mf!IZaN+fIPH9uBmTk}|d(hOCP(kB#9^cWA4iWF81 zfZJtd$Z2gB2NaMWO<{R&D{Jjn-s)UKDVy>AUqrK%*!|mlGIH%OL5jzbxTmn9>r!D} zBdb2=BfNoSD&MIM$84Z|X4pCYk?=2U6x2`-Oou(u%Te>!1SrF~40H0Y;jvO5L!#P1 za%4?GEE(Vap(^BexH8IjO*%OJ-EOLQgUfy+)mbSHv+r{%C2_gkSw}i9UG&>L5}q;* zCV;Q%dxUWGql2W3F>O|oU_wotqFLz?LDA4C&@(&*F1$az`$BRMP zDSW#r%=6&wDvmOMk=bEevo zLAB{C*EqctBMNtu@!u#W6zg_MQbJ2ngFpVkY+>Qtg98;OU|bZ*G{Zr!Aw&#Fh~4@d z2+qX;OV4NGsUgg7y7L33yZ!rY12xU1$^kH#Zir7z*xl{Rjx1`-rfbm>aBTdn;6J6; zq^Z2Q>=Q>5E|(L`94%}MD)X~9*zB)gPo$}N*-FXn_Kzo?XL~ZE50Y)}R_rm zVduNND2sAWQM&@bwd--`d}W6cD)7l*7wrnvuD3PLx{Gb0R6AQY05W$#j7gWEQ}b6& z$IZAr^bRPLa|1T%5dP7J<0mr_qzgAKBFO93n+2%2yp%L78SqE>{w)lHRz#(2zL_89 z3{BB8>#PUM2RVp34Vtf+C#U5AeVhHHCArWvVEN`;NhQCm#T87sr0#@Y`PSSym1KrW zA@A}J;CYu@NysG_0DbEly)fyaV4JYXBv(+V9l{YM7fyv-Y)M$ftJKOKL^E-2iD=z~ zLBi4{cql;1@u>;VqhH)vC`$5@qr@ar<|b;F1Y5UaM0&%`6;{it z4dou{zuUs}kfV-fd6Ox~On=O>ETP7pp9h2?K#+iE1|#hr#0yNPtadJgLz;!gg^010 zDG#?-Qzaj$DP@^xl3jbTru@0)05}Q=HiJB13e&V5H}!8rbEs2e9GWNxPkhbO8m{eJ5i<$6}knY zRRhL@O4+PqLA4JcV@cUgH^uln(rS%H{+s-&{1Ja$9Pi z#@m6XPtm1s))q)z{rDii%md^1cBDrO;YPhKx)dQ3Qa%349}csmC>eSd%AFf)D9a#V zB# zl;6DfOu!{}BSJqZPQwmBfmGLLR)&VIGfG6&v(KH)QhW=iQRv;aHhB+-=yMmkHGQ?> z^V%`qguXL>{t-o{rlH67D^7+%5oD6*|1R1O*yKuUqNFW)9@iNm!UrgBB>@d^z>v%i z;ZGOecLS3IOkx2zX!_gLYR)gu!E_NcCybmS2^w7IDrUnW2eE`5JHyea1(*N6A_pom zRbBr)SdWRANo7LqTgEmUJ`gXFcpu6EJ*K*cj1GC4iURf<%Mh0{V>hcb*v5Z{xgrA6kB1M;O!w!br1ATN>OOD6=p7k z4R}!qqP*#=I1GS)ZEd~iwt4bSlHb})1hDRn)oF=Eg34|p=NJ7X&mU9G2I&99F#HOX zPiz)ujh~BQvbKS<3wa~6HVB;C%}F#0H8l%f?8$Nb+J`Z9K8}li-MIW?x50DIrps$&z>lc)d}QpPvj z|Lb7h^Iz~HTseVe!*Ax7lvW=g;6)T0tJ_;SWtrPnNt?*{^*O5U+x2&F@PlM)?burp3^3-`5JYG<8Nt%a18x6(J>ViU2EVwI+C`i^LkArYLmu+!TAW_(d7X zJJLOwMa=i%R0a9qpDX~@nDFSv6(huTPwzwfv*d`@jtDU(dsZKz|8_iCw3+q9#PH7L zNzn+-b*H2eS69n5$z9Wt7` zKlCz9z-s(2CnLL3+c;@Xv((|My=mQd@O7m_YLGwcL(H&=41_ z_LD&o#Yf=41fwU#l$J&M`aQQ1@fyL+y^mBSQ*plDXjTEb-3Eb1>rc%}GnXdGo7rIW zv)uaqJuP*RW(P;}pFt)OiS#@L=#-ch@}k!c}+95Ngf7GPO!NnN=cjJ&o)slia9%r3T)yADBQED<`7PQ=vFJoi6$&rtAkXK!wy_ z-$6=vZk-}2naZ9l*#f@wXyOVK(eT&5;V)92y!o-fep7jXA=QkZML93v^57>F^UEa5 z*-{XD#7TS8xqnTM4>*tAcHj=XH*jy7E(FXuEu`*g4j4x4=dJz%T8%#CQ=q;kbThMz zlDZ6DcEUz9*GtnTG1@|4EFf&1;b(>U7t5FxnuSpH=x%Oa=FX*^ND%Ok&my|i&0pU} zC2H^$uHI2((@TQepJ_d$L;n%4&DqicdfwQA?UJEr-pFSPS!j^`eko(<*;$`GE!sWJ zM-WGzvCVP|em1Z>6PpUouFx9fCl4(W40n8>mR?Whn4(R1OUib+vE0361Ol8^=(LiN zqLO9IiEz}8ceOUc^-@K_J4lqH2pFha+rRQwOYj}8U-YpM%j^c6NwiSOG0S=YT4@M( z%>vNm-J{_pwG+C1tSL=@WS~U?$AEd9HdwcTT%Dl*z{C`=sM$S8)c#_JTl(R%d4}u9 zR3i#>GtdNJg~|2ACpT9UjV10kE+p=+>*wqrI)fCDLz^|*5bAU%^tvA?GfhpC zco?vTf9zAb@<@NR-^WRvvNoX(CCuU`sSFBH0^LSI=|KxjoJlWKqk@d_K(DhB1@hZ= zCwz16N9WT9gb-2Z2Ay0dHjZhTr1ugcp`h^I@PFM47F3$ZA{B(mU=#<9O0F>5@H?Oo_*DoZAVlSJh>Rhw$^NY`>e?mdu_>dP@Ff^R7F-N0ad z))SqL#3c5X?I$x;54c-U5=RO9qJJHMf83-2<-!iIa=uR5cn*fAcT zn^7NsaR@mpg1Ev0N$?}61TrEJ76M5>xrK>)K>G3(Kr<#!ZVb7}Gkg!mVc6a{j8(mf zpit`VS`6D7pebHx(eIldI~jpQr#OCyUdRieV zXkJOcSy#Y<;=Xo+RuUb2-H94(o5E-YDXeVUko;3&mBzs^j&;>EcQI2|N)!=9ZFScx z4s_N83cK;3wf1yV@Gc^x)TwPd^8(*LkMG#)pxlsjGD@M51@`u`iq4Yqr#vBvVadKC z0Ixl>y9l=9(+F*egNRd*X@yS37&S#(OtB&D?^0)mwaWyrgiJLlV!moQOI4c*a4!a0 zYPtt?DC?5-gRvD^!zeX~W>`@CY<)H-L7?N2vP-bQRm&S2G_WZ-rCHdc5Hz4g58PGW z2q|H7WCdn4EgUR^3HiLOqk}jbaNRM=C>SxL^H!P8c$s>lt#ZBBGQj0+NI{PNcb4R!n#Z~qk~PA4 z%3x7M&*VbA&I*EX);2QpNn;${!O>#98s7|7y+ z(54mxBn=G)$Z!MCK8#&uj!d@7Fis#5oNkwBa1oYiI`1vo&JJJrn?(O`?l%BtcxN2U z;R!SIgLIL%k%677=nGpEO-z&(5wxdkRE?E%gAJ%3ENjcFiDA_jW9;Lk=&**nH#h#| z>?^k{x}bjDdOgic5qDRlv3-f}ZUX3nlBN_2mJ zJ>b&D-n6$3eSVMK+l{xOzMMCXMUT;3CTqJ7DS{T5 zsb+XEtGSy#3?J0754yRpv8L^7Q%p>0AFWpM3@-YfCWhXxW0I`d4gGh-x!<(-{tu&T zuC9UI%v+F#PsHA6yx7C*P=B+bRXi~z&>onh7x;6PjcoEd;opfpFmfnnEc-#&*0zx{ zM5b^ld%*yGX+;YcfkGDmEo?EHP?AnFi`MKSG!{`1t zz1sl)#8vS*T;e9>MBsdlsP#t~vS1>uTjR9`({xzmzre0AYF@0EJ*HiZSDR-!kmnXT zh#gAVh$kkAh;IooyUL>Ke9rr)_Zi`mTO`P3DS*X`G9P0=y>0&egV`2tiVR07Oz;V^ zD|`nzaDF`qi62BBVg2!>?7MA9%wN(n6ZJ9~&ZHVR3<|3g1J)gJTV$Xc{!|IgS!CN! z@zprhy0aIKKOQI7c2TsUs2@Sjfw)`RIlX4-CJ?5x!B~qkU|`%jJ^OB?IPh6Y5rsmq z$nb+&2A%%PkA%yYnkDzlWRYfFC{zy?0-SQegB#enzJ;}+rgOAJ3pj&wokUYvm&BfqFpYk%M-YxPDv8GYZjPtLE0p#43y5}G8iWiZw%Ds38fX5ci0^x>{s^bu#*|fyELQfE<{VIRn2)TY_TA5+&`kmJCG(yyM=hk^%y2mFkq%5;K8I1!sc&kO&Tq zr9Dpq8~SQMJ)}vJDzg(se3nMDAS8>_DkWc+z2RGe5rKqz-O3X+n-)r6U9gVjq8dOe zr#XcaxB|s(9`8I@0{Bju$FY1+8FwHlVd!3h!vz9_6+2mFGd8Ov8OqL=hdKj75gspT#eMe2(HoY6Ze;Ib0gVq z9K#U4lo?TOSo5BvAOG5@yiUPZ4o1VchQThE3sS=~B_Pq?N9Y9Wb^2>M5QQ&rR;`Mw ziLl)eQ0bpF94)2mpm1WbIeCmbG#OstrZk>Znc)m__*+Cy6DZv60Fp!Zy?VD=p4XMuUl2kbVEVxt_YIMDcbxqx|Kn3rTO&%>QPf`(0r3 z&jDs+{i^Ot6qc7l6O4wtM2ZC_nr{*f}JI?zR`D1rC zQm%rx@>1t*eF|$9*-^nJXZv|@JjZ9>p0e$}?~;WbeK+Vga^o{{pz!!z<~sWh#tdwh z7NCiMy(-wv-jzJpT?o65IvFPpO?acYmk|tb(DInfjtFQ=F<@A9>|F&=&f%eA@cM0F6uY*`8v8H5#9yZ6f4B@FU#XLa*i0kd|aJV5^oQpIYn-Ki7tI z&0W2U1NFu-IUpr1lKl9_TD9goi~O!4B;Lw`?K6ZJm;I2qw=t$|r=(JnWWZv~jmD0N z+CLMdi&AoZ5qz(1^+)G$YETf2By>xi>6#AfRGhIr-~XeMM&lFKGvoVA;$<_fw)M`o za#m#Hl;ty=X;k;+Tt{*xzi5H?z;1VW*;*QX0MEH*-C}%lZc+{lV{ce|e_xPa*Rj83 z24B2ZlnZFn_vT*3X)GR!_Z0G=+L!yVyAiDl`NTUPDm{>>Tc6-W?`-HM2HO62ze2ab^GZK1p6X7sHFy9e+J?{2(zJ7ouj0yEq1~X)sE;z}Y zdyXq{n|&>vzCQ|m)hhWQ#Jp&#lnF?Jo$xYNbLjI>JFsqR!F!YHC7DU`C!k=>{mq0f zbZk&oc*Ai|fo)j8@ZsnZ%$)aytHGBcjAPU_k&|A;pLY{6m+zVn<79aqIKhZ_{ZBwu zA_HW6UPt7xb=Xdf2p5*mo}IZ-8{(gdI<5BdonP%9@e0__NS4u0DH{4XAPL0`8oo znxrH9rmIsTK8;DUI}o!ncVNoL*|i1GIu=YQIXBr>6F!|T^KFmkiK){fZ1+Vt5{KRIMgWZ~c!hvqM{Va-5h_f-?H z9l`@ki?4XohdwJ8UE$@%^#&yVkUo>w9xo)ir=HA{zT%Q>6USVv6JbRI00`B0=h)zGV&N+i`JSp`LNk#Fy z(Si+5k$G_1?Iz=#N1&}r1P&aAadSC0xb7uxHbJVy6Z5%5W5-&O44*G+1Gla~>Ir&{ zT=KOr>j;iTK`5`)?RpnA8*V~clkR{|FoDw2?4uTB)f35tKDmvz_BsZ#76a<)=Wjrt zY^mZyY*WP;-_ZVOW&A)6KmWCh4xmH&^x|Pd)v6oQw zo~jm23(3}-mXcuXI~(+KA`u8U{67#>KG8?q@r8F~XRqlaC73v9DIeX&X1qt}|Ds<) zys@wCz6L`Ehi;GzMAI@?qxdgUVOF=G-z#&M;C*fRE$A-lm}){rLPF*29aOv^2dfIBgP@N%@f0K0&ai&laK_1 zhOF$~IJLcbT@_f@a6{#)xHR0{=E?w^!|ajsiV+(=V~0qFe&==n%7jx(y?GO!@HMr~ zO)uyydlF?{;fG=4VxWTvhzXzsvAgQt*!F5HOQDj39d6A-6i_yDGF?sGyJp(+-YVm;~H28^0)Tx;?McqdB@)U+92>xceHYUNd(??cKg2~Sy4Yk zS_76DWcTk{LZ9IryMp=F(%`p3at8LUqTRVWzP@e%Va@rx7uQm%!KR85r4e$?6UqcP zDXqH~P}S)afY0W1qL{7EQTMuswM7F@xhbHYZ7D+Mc@nz4iU27e1 zE$9A3IuOrum);B>dx)z--%paG^jRgb&E6l>o(@rEk2vrla<}X9O$bKv-p(&+*!zKXCJtDobZj1Gf?zq05Bi>&KGkLL z-CovYw1Oqp^RS@9>Wy6AR78pVxz@uWcTEq*p70H^}j23#r|$1Ps1P zAen17=DEhWV~$9ftAwa_8#B(7!63nu(vqTRw;m*S7^ry6Xo#D`i5z4;Oal{6M;yg_ zE{2I=2Nx6>ums@aQRlw6tNKq1Rg`w6M(oC6*r;WrE?uQ4bxJ~oeW|525hHS!266Zn$dS?T8;u^YQF z3tZ^-(Mok}V2wNukfSIJ2xpcXKFZDv5JC-v84g_Jq=}!9CuZmx6day^-d{Q4dEoq1 zb4G+MOt$-zeSio|xE!sOy*~fA{$@5hP=^Hr0KsETIG$#5h`f;DL9u>*EZ?KyhGiUg z_A=o-%x-y(4>HlZF#Oi{f++1y z(m!Ih8(kZhNHkN|3$uo{*IA+%S%s(ueltY%FU9rr(udAX|bVkpFUOOLVJ}Bn7{`XqA20(2ozy)^5^t=p$ z2adkuVacY~N|=@ozR0N_jip1}56yE4Z?Eq>8U3D*?JzTyy`G4a088|>KKF!psP(8s zW7fRQMuEcHQ0O(X+<+IG(+vQw7>2P=YJFjKYhPOsFXFweM#ttK-bCtFKJ8ATtl`sp zIu4b67mFzG+}v#Rg451+$=2pY)5t8M^Dap5Laqlmys)Sn903_IH_yGr!x-EJqUTPL z247pVd@m@{8zVJ*lYe@XA}j_&me;9Z(&39P|5;Ec&2$__nPj(wUKd21`^b6OgI?e>^5*!8#gDX(JU6=g**qSbU} zM%lp>U@g3KRY)}eW-RqYBxEJzadXi6*oU1e|7vR0@ysyp>%0ku+Ml+e5Q&L{z&kcQ ztKuqoXTn6V3L86$KAyCd$Kizghn&M%X(`(bImbD8l~Y7FB4WDE^n-4WTMtABfMJ%* z$z~fkURJh)-e&S0fEK52NpVXL(8w!EqCJzXfXy+GUMFpm;al6L{+>5O8*i}_sB-Eb z6aU7K#UA-*zzSFFJC*H4CRx0M8>LFE-`#b||0!zeO1o{U#~V4dXhPW3Ng9%Du8=&u zVJ*84s9IbM+-&R!X;8wQ9 zUes~PoNtsE6{az@eEGQ(?U?;#mYUvL+i8IuqtpW(NOgaeyb8sL_eoBu#SM9E!7=M5 zYPAoUG!;>QuIt4ryCPp|S8Ch;lJGT+Vdy0nDSGO0MucaPY>n0-yZg5w4CKj- z%dM{Xr?rmX4R`JZu|le&O^PbXwTO*`rI;x&@@xo4A;jACCX^raz%nN%VM166F`H*` z)N|I${-SH_XT1Q(e#cABbGUp2(_|z@pB}GXy&kj4NgXq-PH)z2k0ll{H8j3^4Im5 zK0t(?qtCU3_z);;1l|8Ib`HUvHqjc7ZQGgHwr$(CZQD*Jwr$(VKbe>l+nV_1+ug-o z+|66nZ*^67Z=OEg{hZ(Fm>;80JW96|VdQ1qdnm@D8|nBcKn28`dg!cAQio~Zg9oMl zDhh*Hx-#4z3c`fKyTSctDB-&J zMAIPL`oP){0xheG3Ke*DI3o$9p3YvgfvHBwGAiH;oe>3R=!gvkVmHX^?K3AzcD=!k zxXvovHm8l8H{jbmGusxc3S1R+5<@&5C~TgQ7F&nC^t|bqNQKc=rOnMGE7Wvb_@g~B zv&&?%%N+Z69#f$)b{2JM6?2=N2*A#e=NEBhc8zuVSPEkettN65uo|1(%jO-*a)X%; zXhz+S)9 z&BhzD=zg_Tu4eYe?(fDxJwyS6ndF^{phqI!`ra5My7$v_k9Hu?tFUb*fgRcM`~2pG zGN$)%gtT&gW4=imp5BWJ1D(K%RV|ta)`8Q+&JL5pqf5fa&Y?C%9s;6bA0!j`vsY1w zf^$Fr+QJKs=$EEVLEmONJi@EizFWB`l_w->EVzseQ5B^Z#jge$htL!yY5;AxM_{@F zR>Iq9Z0Idmr_%J}`JE|5KHakn5_?U!t5NAEj^;>4{;(oDTBQ%>F7S362;5?1iv!CK zGF4fI%f4uX7LpxaA3!SqHtCjLEY>nwQ#%LyWWt?GIhqdmzDlu!P|DSlpc0%&q5Q1u z4Y01t?qc&(rbI@fUl4bfEYVb>;T}_DJXi}V1?5S|W}vKtD^Dvfq7)&g?~hMMPA*15 zxH=3LK7B20r6ftx^&NMU6;dl(^BtChw7gEv8_r7dYHxi!QMSpwzpzwxi<$%HI&&!P zJx@(YHQ&Y9ecX*hu;td74@*}nmckCdft4rbQdOaUs9t$XZdZkcqsN{`Y4<>E@ePL4 z+X8T8i&gJMrs^v~Sw;H*pmvPHW0Yix5@xxC}SH?49u{v-0bE%gzP(32>-5|&7I2XJPL z-q)>wl2aSAK_5@3Cr)oCm0YYsb+_2 zhqoJXNcLH)lnidAdFEObaIo0N!#SC`*D;l|Mg{}CN z&tT@MO%7hlp@sMO6=61#;f#8g#0S#Rh)L1Jd^j~U1sEu{E`2k_cyi3UEqcApdY-;qf%O~fI=LeUE-Uj5#+Naj zeE3fD$^cp_plG(CHD|b?(}-vsWe`r z0Bm7I4yVdMuYPaJcz^?lRe?v$OL?5kE%oDR0uQ;-0kuZXLd-3k0W^N%PT?W_)PaL~ zzzEsBI_QQPa7;?jr%3Hf-O>YOHhDnhU}$2|V${H4-b^Nq(t(94AhCHU9ZCm-5i=-; z?B!s#k7b%3&=o@5*uMzg{JK>@N`|U}bkwSY911ny4S_}(7?uWG_4O-Ay+q6ymSE}M zkHVHjc-Z3QbJ$zt#G4mJvVjE#C>?5 zmoDB@Tu33+gdb+^$`TC^{h6{u^-1ZL{1FD|Yl^#9S~NtoK3}c^+7o!`#CY||&wByo zCn0^=T*`|eX>rE+LCT5E$w8KChxkk96LR)g}3vT^81POKL@|Ix%4m&NjGwzD!m zL5Dn&xdy6f#kn;z?TjcAVv_i0e8gzH=rIdIlpKz55pe}@Ab5Jg9MS0`Ho~(q--9q+S%#%a z)h~H0?iy*~YS~r>C+nsUsW+VH1vo?L+TH?no4!?JYopLHdDF|nFsoGB;UEY{Gf`X^ znBWZIJ=M3S+6$~LNu8;d+8D$#Z32S#(d+`Z-p=Qf6`MR3Js^Hc(MV#8AB~`a_)BW- zB#qE_(vK?^P($Z%Z9Gd^j3iTCXObII=l)- zN=2bqkv0<>Zg5~J*>60Kgi4Xb6Y|zT+D}nDuzXJseU6oiWsR!5{R&XsRHU4HVm>=$>}si`g2{cs7ic1Dg(fPJf_{-E?4tl^(dV$#nD00Hok%Is+d$_l4B zQr4D_-qMLE#D?fd>LoL>=L14XjpUgG8`>&Jb`~lF_zNLQMn|odKS-FwDtwZ=`K8R@ z8X)2fUd+v+?La(<46EKnLR^ML_#H{m)0B&K{ZxarG!4k9re?rJUZ&}q(aMDKa z9krKGa&O4OVwv@}Sivh=_(Yll+-%K{M8e-{3s$f|;IPt3*|h#6+HC45tXh$R^(MGF?{&*;{WbQG$A zg(@R&RiFdoH$?}oDI%}^lWSB)u8`Aq$`liPDJJrLE;AY($j6Hav#Q-0i<+DdNi z&d0z(tcYl04~JfXJY~coC%^^R)OnE9+5NJf1eV5dF^_qW8@zvaJPC9Gd61AtUXF$Q zh;zg~1bFPchyWjLU8}u^f^z%_HpID4PXavRXWov~C$C%bHb`JGY@RBpZPuKuu$MFJ z;Zx}O0F|B^kvar2ZOGB}s%%%M7LMu-e!c3sHu{x^@pAU7Ms`fKY^^&Ho)A7O-_!7Q zP$=PVz+FJ?h*n{kw5~rs-Sbk@_i9<~u^>RrIl%LWR9?~bw$A&@X!r;XAvY90g?A(? z(XGAn@kHGwj~RA7<`VGVMxgF3RuMtON5ae|E;KJzqb(m!`Tnh5?a;P+8}iLmmDe$8 zy$Tt2WJ#61WpuiJ+`Z{o@k>tN{XAf*T5r5*3*s2!1CgTzl!NRX-(#b)F-PGX{`0$z zHm^UYALz@sUs!ao}?afDBnGI$yjO?6Nn-<0Tk z3h(5*Qwc6-Yw==hu-`S|Qu9%wV}(Dn9QZ&&y6#uJ^ws)=YAn%(6RWM#21y8ckfE}2 zcZ)l&pab(~v$!yt(tDmg)oKrop{44Ci%Y{!tDF12Nur>%^u!%U4fbuSknu6zM~rv+L-nsF=q zwL_6Rrf(6sOFvAs$TBIvw+SW+)B%hI6<<}DbWcSC;XmaRY3Xdwh>ghARLV4hiXK?w zrDbp7;*@7jsmf|OMqx!<{oMVY`$5Tw#omeyOKCYo04~J89})u;YaF)lnrTT&d@Gph~0M*aluv)_7!}awJ9E>}-e5ufiT1 z`mJplfOBHjes+`%!!MHy+h6DkXf}PMQ0Gtrxx-Gs{|*u;%CP_1UZ`sNdrqFr9J@lV z>}{#+LC9Bo$iP*5Se)f|VDhub92@csx>ae{^1JgZ+@^|ti#LySlwkVGt=hwdKhojS zPPHXjP)fQCN}eMnQ^QZmWolS0`)9-)Xjm5i6wd~WVqKK_;$o3bGJ+3- z`CT#sJ>hP0eTHWoL^YoW5dxB_k5tbpeH~Ync~Tvv++z9tPMhB&E_bwiCq+8|ue=mb zdIR}=;Mvb~PlRlx0EH(9R~3p7ZEwk*5bvmR%U&`9A{=yzyS3{*s=FB~Uh+Kobnc!J z6%G`JsFOm{JG^tUow%wImkipdlOisZ1p1Yl>m<)!u3uFnJoOJK4i-i-Jv6ny?)og= zEOM9rJ}cuUv@Rh@X0Wlf_~?u?pO4m;hUaK|Ut-*f2}k=wCJ7TC!Uh{3M!j1$mhAXI zTjF!+q3(#dApUN)XX>=sGIqYJp*t+nPxQ&vk)=1VX9Aurj6pmNroFuB{K{1q28@nm z=*S|t#Y9;W>|>%b6N}vJ#6SaTDhy<2YPGogGbQVZw;6uPQ%S1#XX5=-F%kMSc0SdX zJxT7(lxOv4GMX@o5be+G;-ud&=DvA|IU_S>C`%dX&Fr)ANu&PTKQi>OE1+4MYHRi( zqfMG`drO|mSMU@lEbit@m~vEX=xfiL#S{f@0 zgu~4MwG5G$5gcH04MaSOZSgu9T}`SZj-y5?j@iHVS1T-72owj817YY5bFjn7VlVoxA}ZwO9VE?$I2Ciiz?A5IRgo^Bpm%)zM>P}?U)7u8wU=+%vqnc7JhONv49a% z+F!H{oi>!#ck%d|j%YANajaf+s)ohuS%y*?Jgodkx;3*E#^!g*DFkXnOxmHm@`Yso zn;_*bbl1WFT0=$fo0?1-U+}c^Ef9xf%>rFc*Bsyf;zFFMPi2Tyi9HJYj{H*T;CB*> zk?-}q9SOzbG8ERDK{yP)f^oVE=^iqFca~&PsZ+5%7w zfo25;IBY^2Sk@HQRNTi&g0Xmlf8A0_nAG1hZDwa9Nr@;N_(M|#UBB5!sGWs{Lq0vl zWBMNk*V0>!4k%|Tf90BUgG?xHG|wJ9vh6;e@YPl_@r30HeLgy%GuM*rPOZG`rwV@ZpCx@&~jiXI3UADB!4wxjS*o` zweu**oas-4Eyr=a%^*4O&%&nR-eJq=)uMIUlhx z+g$k$S%xBq{g` zrA`fBxWyhKG7jEg* zAvT91c2+9Ghhm{h=&VD?b}bFiGd$@?IExCVSjYy(ac}EVOznhvE#{|y^sd*CV&`H+ zu&C_}I|GeM$GXI^oZ(C^0&QklzZWAjK7h)l$3&|PR^OyejBeA$XP179V=C_#87vW> zui!XYXXLw2M<-5#I;yFGoL9x#gpB!el-*8a3`uOL;-)yI$HMwcoHcw9nRPaHAiNxE z)Nf8>6`>!{TX|0_P{G^^j`|Dgl0oDT)G~ncT))ry#CH1jt5a0XUS~A?UMsh3S?1Il zKU5-uKMJH}oI47nu$OFCSg0!d9Mx=BawSSxTo=y{YZ0UH8i7BB_T6lXd4>5;(%aU( zpBXbu8iRPw0LSd1be_cYxf=$t5?xUW(dQejTB+8W)Y6cIE9^W*Dsj3LByMmjx3c+N zQkJJsrq&~YOao0-i;dOU6ZrlRGs}S@W2f0zv%Pws%Xd%ZLpjRd-GJ{+&(DMWdV%j- zVU~JZ-7!$BaR~7BggZ{aip~3_8Lx8?R*Vc9{ee)b^Toe--3|rH-)%QUsT-<9n>@ET znd6@C2yqhNGeJnU^s-M}ISr>aY11C2?!U+;u6UYc7YTRw3XXW5*QY%$ZRmHM_PGG@ z-meHgdcOYwbA+eL{K+-uMF>XB6r*`sr~xQhX1(VI#))% zG~s_l@|$-e*mB8iJK;+PDm^;mi@Hef7$aGyCLmZ50`DKP<97;qOZ#TVZ*gj7bKqkW z)VtK;Sq~jtArjz-_yr5JUlC<$71;4aF4cgV(%A|Z*!1{8rkR3NIvfZwX=%0E@k$0M z+PIH}2neq|u@g8#N$O;5dYW?ba>iZiikGp?Zh;^K1&7Tv*xJTNXmM;G9cJlXdwk|r0rKj7pu%`B3 zjq-nzo&8{|3RywYB<(gF(b7hpc27x~L61Y3l%NC4Dt?ZhrWmJ%{E=;Fo0a@1>Ih{S zN;p(02H}o^w$H>M@Efk(gxn#m7$OfGFm;zrYJ_WA!^3tY>xy`AUXzGBSlVs(r08&R z_J1q+m4#9zArYR0(3XDTz6wG4((vz0KF|q}8Fw1oixf>G`%fTFiqd9$B&LR0j)Whg z)1C(^0!O$c3|2Dr)W&wH}zL`GZ8hx!(RY7ICJ5M z>sj*thi0ry2A1V{vfDF&{31&v!`6qrr6#Y-)XU=(Wdd3;&)N<86x`gup)2$*yK}cI zv?8&-R3@?`vz9&!`fYEUTai6zS}TCSEv5Gnn_1nIcL+rHV?i2oBZKGOQ4+Hc^!lpZ zeGoj8Vcj|-IMQToM5P<4cCkptFA!@V<%EGU_&9IBR59wvH!5RD+&H_jtR21u1ONLveR^4-u5+eRsC zYNJhtJC!cWq~1oU;S@2NdUtAYwN1-ULvwte;i_mhlm2Rm_8`l$MF#q}%7S%<8f<-Y zV$sK$zGea?1;r+mTehi*f6H%o^1lEM8YP*6OIsYvl1~6fwWxoO(!5yr6Ff~F6Ozm> z67n>n-!KY2n~oo;8@-Xvu4GZqbGcBKl56P# zN3sX8j?8X@py0@UrGuty>ypf_b3`g$eKdJ=jA^T%X&O>AsO{$AFUXoPp=6g%>x9>I47#$g1>}e5UDVW=Sd1&%8 zZ7?Hg%E^lYKvf3WYtOc@96Erwu%tk^mn;>HaQrLkSAB_}4P~Vnrx|PNh%r@RVIh=s zS$lZ$M3a4bcrqNIP((zkOp@#8kM9)yhLGvLQbc)pGE7oX*JJz=@=JA?BfifoKvDU3 zqk*#&%(_6amRhD4j+R}WCUPsv=kQG)`E`)M;cmd!^SFTLecyno_$?YOtr5dt2~FRc znYGN&aa_iPf-o}8m1vNs;fo#N-m}OoNOJenW2_li2UB1}6S;wkZo;f&@9$stn9c3T zqEPv!J8goe@_hfyg=}9i64%`V0*^~Qwbc!AhEHkbI%`I)XtOE(>B|dd6GamNS8w6! zp~njvxHC^s3^kU5mPK~3kq~9E)nodga{Zor!WvLJ2(IDQ{f+yt^5$--A7=);{x7f7 zyZk-=ueTQr0)AgtcbLWoe#1!!-*2l44&Qf24-p3b{=QG2FT1^cU+;f*13vFj1-{TFmQJqk(^kw4+yYu$KG*(pU$%3hG6Okove}k zvs&n(br;x%ZtyH_njnxUY|kim2jFU-=gU{jrthV~N^$*ZHwRp~FrT#Y5yQo74O)_@ z`*tRX`zOM*XS*Uc;ErjkH?ZnKs6d^P=de0?6VAaU8=Ld275kpd3(QO%d1|9-i?L^( zWX>z@3PS4BCvTPRhp`>u2V~PRcH0nMprQBRO;|VCOSM6ro)`fma*~j3gEM-B6HSHQ z@-@>u24s0=g|;1SZCS{_1N}H5c>I-K&P49x+*ILL=842Ytj*akk;cXSC-HB=;Qkmz z^5Ziy?t%gsX*gov@I)0c0n$-jzd~@jI3KsHktXbmICQkS2B$nNnuxjXmDXz7>7@nr znHsK__AyZHWw>3lSh&N(=>QatbeAv?IiQqh3;vb0Dw$svlgchDOW(U+K zP&?&(Llm)N-v}wqW>;@Am|epK)osWnA7&1mZ0=zDxWDR>RAQtN{gg>qZvQwAkAq3X zi}X7dpe!yXR>YM!bNS461SdV}0j%MwEi;dhcDg9g*tW}PngB}r6CgFuatRHjh7Tr) zmQ!=pyXhnnUPy2Af|=(FU=bB^cmu-PjXlF?t*RPRc)IiSq2RhydUlZDo(URGVwyE( zcb24I!ApXfsO!5FG1oJ8(>X<>GNYiQ=Aw>pq17&Dh(0UT)y@aLWs~AiEDFPT1)kk} zZL_#qeY3^&7A<2m4$)(j=>0QH0M<9IMN;sVa_dga-p3TNpG%MvHRG;0Ck}N?OjbQK zcu7i&^29RfK4F3R)8k)Zt2`a=n$3oib24GUVQ#W-kcFBh@jNfZTOGuz5aC5T9Sap( zuUF#4cyGh}D7TUTUpbpwwYp*zrhL;|(3*QKR&sT=S0i+iwK@-;Kl&EHHbib# zcX#p?C(Luxcz+;>1eob%aRZr4w43cfFEKAeebHF}d43II2~3)zmN*j^&wTUd?oEnn zn^1x((PtHv@U>H$7ILJQ9<~&=A?n)>&2+3E&9#Ok=GwrNFnrn}aICZb@y|OGr7w=H zL&A16P?U>itsuFwo{KFY1*(P1bLxg`J?TMK;01Kh3YxWvTxy|)HR_CsR^TDo>YR#p z;13hF;3jca;I@03){MPCWi1!7K1Q7$k3-SJ*I}d$8?f_@1X7WbzWzmG&_2_-El|m@ z4qDo&Md!6CTR?Ps1!7cZ`T~X7!+t35U3?29XkPb~V+SULdW{?u{3M>%YWb zU|WyzUz!6r-S5BGLEquKA={X2P90L{Sbdv)21KVXgCeuiOxhu^Epa~atXP7(=%Jb+ zF|1z#11mTA1SwQ#-4PAFt*YYr1_~9{wn3+RO%Q81FZmq7)L#Z%YS^GN(T6H3HN1z7#!Zjn2@?{! z%o@5B4XI)oC+o!R@ufj2H*`lzu8YtjpwPk>k$*A|O83_$xjJerMGdB_;QLx&gY%|V zwGyAUf*Va8&BoctD-tJAqfI}?m5d}!auPH^med6A672YAfVI065KUUb!aW`?|C0*~ z$4s1wn07wM5_JJc?_U*Mol}$iV-+5zJ3>hM&rVo+)HfpEldJjisi<2=M}LK#+M9Cs zI~kN}7<}bYD`hG#G+N(yRRB3<47hYa{`=n|Uad5f&KC^^X8;aPo#YA&-m?;Vo?U?Z z>32U+C8UqJUhBn3GbddX$8M$gVv>vf_oe1bymd-TNK8(tQle zFg}Q+Hep5#gre9*Bgw#~G*z3)W!TwR?OP^VO!~Epz;88~?bOUD$&7ilGHr*0H7;j_Uv+S{H6DYdUwZv(Wzn zY`C6G-e@I3H;&u(Z0OuVLevMZ|)dlpnN*s5&&S@@>J z-G>iUVxf4g7@fm4hzMb3w>M?63sD+7eqwBI_5%HEq3Y~yOU{SgD(CjZ`fYR1kOMqv z=VzNjau6_`sx$`n=NlyM?l{DV@By=?_`=DEw8+PWp9hN8tlond+W;5pQuO)^l;=~td3os>Eu|)VDks$@zpbah>uHb7 zKk$ip&Rs&5GH>D?)kxo45ht6MP3J7UOnj_oJ}cZzxh_4K@`5rvJg4xN-Cj(cwp>iW z=$%xXq(7Bk@&fj+DlYGfNZPs|x_;jqi^@67u?=_n20fie~rdwwzYbC5c`w zo^#%xk00^RcvqBn=2!nflK$)8=#JpJa$T^wk;~H7$}{L`6QMj$aoy%#@IhRp#pBUy za;1xt-s(c7yK>F9x|_O)NB`m#YUjeZSML@2Sh8P^teh$Ck54tj=DBZy$d$V98RQN9 zuCV{nxlMdrZ)!l9g${CNyX3f-Nse`0}d)s)p`vD%%JBg+V7OR~3WO?u2bQ!5Mn4i$Ty zC#+bKq;9#;z%i5*WK*h05=cywM50TmNBQV~S;9vFZI80w17(tcVuF<r9y|Ai? zmkTBdt|E;3U84N9W z|0ju8v{xpBxo9_ILzVw3sB%IYse~gLvMOwhPi7i}Xn~oyoD3FGtz{ut(MR6Am*mdY zgDjGNmKJ>mR+69wN;h**y)Tf)BWq5jmq1)u<+eUDhvJ4fQ@fC99P5Suh0*{D$9}Gf zoGFe<>aB}J3IWOco*M{fWJpW3luBbNm-+_M8Vb?4f)gDL(bac?FE*VFCN0i8NR5v^ z!*&T7vhIt=*b+D6?HhuN!TEihHzRWLHej`lh1i?oD##?vlP#0V zY(ORaj>ba-OT?q(ims6i8pcwRfk zs`L*vHJ=b{FC#X1nY$UD9?KYV_eSmxh83b zLb^q5->OjCeM#XklvGgDU$Bxw9-v4|A}EO)Znp8(F1SP;U^Fg`uo3BeAk=?!^1@c& z;@;tkIBJc?>9{E8bD{3ftT4l(#<~xZcNkhGp8 zMFDyz%DC$u2Cy15i(ezC;cf`EGOmS`k-$nL5|;&TwG7hM`-Dh2Zp%x%J!*pb?`wTw9!T4j)a8zs{P{0p(rbYnd$6W8!}_@iDf=}mr)Q0@Ejdo&;sC4Y;Z zCg8L%pZDt^qS-!7RuY-uEd{wNtF6Mf1s#M^3b{-LjEXoZ*2rjfsTUa0SrQi?WW9!u zEx8@DCex1Z#6I&MUe`n7ou_Eo*Ys}7jpLD`6sHW-!PxR}v_MwVc!%sIlrES69Mmz@ zV0&UT2vXX%Szz<%^p%)2@POZojA-UUkHfzun4wg!34%4?Ql1g63g$B%G$Pb#5D58J zKwTs*0t@@_Y!(z=;v@cA%J@9qqmPE?ECWPgfD>4cMGJAvv*WQZVowoE?@Y`GT`J87_9F}|vE`w%!dsxy^>2U{@c zWQQnR(RntGmjV0g2#Ffpb!n2euDr2D!sTq9RI64^n4#^RGOlo1;d!0;%Wk3sQp?tY z*V&+NE&?+&1>@YS%h+2j%3d@MDqW{5klxhB-QBsgNus;#`9mTpDiA>!sfKQLTj`i3 zchUF%s5y=?{L)CTi#a0kG)P3~Qd<(v%$OyG!*Wgeh*g49bUi7uqFsPMV@dbErhjug z!=wylwH4>f#g4wxD*`ipG)+u8W`&d77Zri;0~5pD1l{ju8#$=?W7A$b7mV#h7s6iq zvbogxXrpz5Pkj)iI2?GxH|IA0sMEWqtvZUBaf>}m^ro6q?6FoLC$y`eGmsnPc5O~y z4o|q4phm(l4mYl7N&C!liJI;3uD>&HkdCkH&85Ndukep~fDeXo+e@&xTwiH{<=#wO zyVZ&Ulpf{L{idTK6a*c0X&Icge;u!94%mcgC9w7H6{D zHKn^)WPufeEQGN8KSyXN8M}}vF0b5N(Tbr!dum|WS;LC(oghNSEI|)CYpb46Gb*{6 z*rc$rOCL~gP*g!gjOvdsX9wHztotMdd@$RDVtzM#f4ylK(@Ym1u@^OwF1`f7hK&<7 zgMNaHf`?H=BYCd`s=`rw3xmb`ForZJG5>k})8C<_Dln;kSdq4zv~kv$E!FWMpt!^f z?}67MG_~z;T?70qbOp1aAlO(Qa$NbyR8FY$r=_u}$zIZcY->f_+ovVkM7lAd{V&8a zLsDbSy(-_Fq4X2f9_TAx0`6ohP|BvEj{!|=egV#<%Z3H3FSoSWl}6RgEzl;Kt&XpY zXn8*#n+fQYwrT;Hkfj6i$gCzVY@%8{K_VZQj*hJ&AsJ7bUT*nhVO;gD6O}a>-<4re z0HY0O2y=|!P<8W6k?ZuO;vI2Xc7_frnR*3Rw{W1SlJFhkFOIC|!Sszw(QGm);i4t) zfmsO}Bi!zl8PFt#n6vdr=xJ8U*=FRYf#0VE+gCjBFPgl`o(TZb7^+36Go_Fb8L|9v za$N0&HB3zJ1i&h)UbDMFQ_jP?V7KSTGh%+Cgbml5o2~3<8NNGyk(?qN%y$d|^@SW~ zsr#NPI~uvo-TklKQ|$AgI>pYCmbc%eZQPB^E^yu9E~)KIOD(eMTH~#Zaai**2&aGZ z;_4;@Z;)x}V@f1JW(K?N#YV-)cyDTe3Kosn0p+CB(@BL4#wX1IO0xD-tg z&OMzm8itPWU#Ub^JG&RK$ZcQ|D4Z6z7|$adueVG;=rAVjmd53cb`7bLOdGXofVbu7 z(Xz9#;SAncMOuF|zdJg+WDP95&1k|2A=XScWE8Ph$!rRdOadGB}i({S< z;&T~BD%uT2Qua+diTuJnHD6*$K>``aTuw}sWyZ6?dZFM=FNegH2+kI9S%E&0^>2Cf zr!6%(>2H?M#l*Ixx^rs|@jK5Lyi{{?Yl!tG-ma}^8=}%bxb)&T)^$NtJbo9oZkj&U zthgePssPAN?$E4g2gRsmp3A5=t?YkY;UWjt{&wy`*Uq>EjqV|ab)wU-IMou5%(8Wo zf`cG5St2sg)4$t9cw};psSKMSu>RyodS!aQ2{wEU!+(pjU70t5hcW)56FoA>S5OM4 zs@$_$QN#bC4JEVISubRXNyWl0Soj3%a!dBNO5C(prq>t{quZ1LE*#fk$Yi1(@v`tO z>ptPr5VZ|#MU!dsy1Ggukgb$A5*0k|yyB-S3+b#EtPXhN1=>ojY<{9ToUR4PUHns$ z;`d}euU*Jhig)kNLXTw012~r2lCg0db_e!sSAnIPRh3t)=y|AWRW4$6q#0fPEFH>& zjqd!>%Qe74wUw|CEOsKEe7;u1?}*V2R<7Qen$<&6SshEFBJNpxfMmQ^dRBt?o}9#5 zNNjF&C*u#y%mS5k)tiSXcQlSQ<1CE2A3D62-T38>v!padQTA3o(qiSW)*2-?9Fxx| z0;^ZcIJw?*F^-)?Gf|8aC!5{lu)%B^MOCa8F?{s>xj<#p%Rt{01cHBFi9E+L{Dv67 zkkBgR&9W&?P*T>I|6F4>m}Q}(unWhSi#e44*mTL@!!#4F1!;YAtEhPvVj-5d#->b6 z^g_DXd>|KIhqIvMx5ZmC&TuWyED9~%l2Pb&=ooGe8|bk&9-tW;yISm-n5VlHIP;ir0v7{kidUVWye2%`NNn4&4_#ojv7k_F13q5(N?WMQNhD7+ z52qXngi}X0+~sEomp4ofm_|0KqrQ?!j^rO;75G`qaqal4h@VnGcj(smla))1ari|t zFgMX}|KI0r3;Y_f^%T+a4FbLKGF@$OrcaUPLeJJvXb!Fg&(@SdtyjGAlRSLaT{qia zf+~AgF}P+G!;W3+lq9^eBroJJ!B#%_3$&+M<6-;{o`kzme>W=~x1po{99KImd9(93 zTjJZGdKnac9Zp_N?8EzcbC^G%a&UJq@GbKr0PC(#20_#s@e;`@<>q|~maF>n0d!Pe z$lLF)E#>$6PELup*a+>{6INaQcfZ`G-28TB&rPU01v(^d-@ybwj}fZ8f^<$UEU@FxLV&8-wg5izt&ihvh!OK{;_Ez5+Co zRP(Q~Au%~CHocQZjTh|vzi{kVPfh&&KVBo)x<^+03k0Ix%=lM8TX*Ugen%R(&_kQP znC17Ev`(uxuQept%m9s(yCC>dXz1{2X6w2j3O}6NAZM*APFB12+Ikmw%Poi9IPw@s zTk6U2$m4%DrQ5*nZv?R%-~SlUykm1WUq^CnPsta(g?$V&G`s7lbWoR%b-U}=CH&Uh z90bR5ym!3pb~{(f}$EHBC@EzI?LVMlGYz+vQjtl)fg-tT^VC$D5lz2Dy8{OCAw-WSR_?b|5w237+Qx1myUh5H=Zl`2yz2XIx{4cZ2zQh!Lf@oS9h)68xHO#JWT)=AWOW7Hc-3d7?_*NY>(@hYH7-)%Dq zgImIjCQOuoYJiZWD=2%KT9Zy-_E{))+mf`$kL2LQE|7UExiKWZ9;(A=5==<}RxH9{ zWy1G6bAv_Eb&(a2L~`$g241+HfW;OQ3-{^~lg7FH4(;iP zRlOn^5bgL=iVTC$h@E&OXCsmgyhKx#Fz8ISfMj9Y}YT_px}&fqnbgO3hVcS}#Xb@I1C?)ElIW6Gy-x-+agQj~Z` zFNx_hFyF|%_}ior6BRnm3db7=i=ba&Fx za=y)r?j7k^&+rN_*R%uvfg(Gn%l_|=1H$j2-2iNs)ZQmGYwm%EZy>#%RUE`ehl0e8 zoflL#4EuD(u%-pG0%^BdU35BnpLHXgshmKpJjngq1Rf{?6{qZDPfpYsM(XIRo1UH0f%Al0`Pxno;`l1i7 zOVjfPGBn3#{`WSe6r>YQ=U!ZqY*|5Nj`foXio2CWG+Haz!AGD!uaiT;i!(Kpr25rUH>j_Qrgx`gygzBHFyFGC&IU%1H z$SL2nRyOyIEbtWz9jf61zdJB{+i)!i736j7lG7|3W%YF=LZm9oRGaUj&18wj+|Yn< z(#U|ZPRlkk$T8WplF9Y;Mk7!Po`DzaEzd>;Kvmp>nxI2E{-a#8&ufNi&UVc5eIOvV z#{NM}DY+t(V-~C7Ds|@u+oL2vVMzy5r{vFBaYN z+qUhbW7{@5PQDn^|IB6b1Fhu<;;bLNfR!{wt7b7-0VruZ7f`_LZaH3b`GZ?FuVkhci#ql>qIqGz0aq^cWPPbCcogB9o zNocZ1&v#bwI)#Ke5tRiMm*$G1lrDh+jurU#vP~g^o}%7?9ld!Y$7zlg%n-=2IgTpR z=Kk>Q#BV;OpK?GkkuZ7jc3!PMS4A8JO@%7Ap|O6&&_)rqJDiaAysm+46-I#9xkskF zEE@>3!fB8rYIJ_2a01&~P3(fW_Y2_P5Li^zcX}Q@e1i99QF-*^2@GJxx+$W_@%h6w z0;^r~0+Wfmkk@D+bo=1v&7$Qr@fwPX(3MCU13cdXE(T>HL;?Zp5y)`}%1ma}tZ>`0 zsd54ZOno8Tmlq*30_4V%zPb4al2*wQn7)qY78138?J*R1D^d40E|~Ho;|p2*K*P;F z8%5mGbc{gv8h$vJMo=xbF3C#|uN?~5W5m(O$dtvK5iEFovZ&|ni3cx#ljXvN49%-f zS7)SSeHA6P<-lEmLdD|PVq%s&NHD&{zBv{A%udCizhn#t*PX8g)_@`OZwe%Bm|T%3 z^fk~KZ-G-sHoSQ${jZfy-i6gio(Z4`U^14%-K8L6S9)_xU^$BVEsHuU-T~MHplh3| zY&XGV55bia3+-|^;AN!hEOuCvM=4_zz;r8fbW6vZMXNFE*U!D~sifPsAR+**kY^ff zc%bi<*%Kykw>zi_g%K57)r%5n&z00tJJ5|3=Nmt&HTQ+m`vN1@1hJmneCM-ih&HklXUEn z|G1vNr0SaToQ1>oPsE(-%R1Y3V?5_lwXkif`hwbdn)TW_Zk~?N*1nS?121fUp(j5e zQQMttK~~U6Bik;7Ya?-t8otXn&pzkp?n1hiD7XgtB44HFOjfX&En5Zd;MOJKYslqESwvVO+J)FmH7U*V2QJ*rutd55H=@Bj#OzWu;dK zG-pL1Z-3Y55t}K-rk+MmT-1k-JlRJQ&UmvPPS*v6q`qR8ur~O2QswH2>c$gCOXZLU zSYc?pFw3xefuc;1(XE#RVK~IibUY2YvIUBjVI{v~V#Gq)o3tKfC7yr$`Y?iMo^R;n z1r>u4Y2qun2*em5gMW+H=E5sxv*-FK-Mx982U=?PEK6^9DQM3ArJa>GlV4g}cQl6~ zZ1ng9?lHfmu7BRvqjRAx*}8ow3Qt>LZ>x-T{2;Y?T(Y~gL29pua25li+xA{CH{##7 z01!kIZsPYWx{DNnpZV<02vZx49E<;XyPwPVXI0@{myycen$Q={3@b8gf;cha6#f+J zTnJkd#I$)!0rVsm(Wt@fgsL@4TJ{|XQKbvvwIrokQZxH8L&62KL{(|&muC5nNuPue zrdS&y`bun6Q{Sxm)-+?E)|9Mi|7!}#Qa?c~#yy0JB|Uq!z?!J$=bWs|hHd&nLIVox ze{ZBieMDIdAO@ZvR^fIVZmb;yl7ZS$l1w8KNj`#^7L+Eg!DKo0rWxXGqb>A9tD5uL zag^w3sf~xFK>a~PjmGT2yf(~o6$#p+AK$GBB+@NlxAAnm)1(XEI^{MN^1A4hy`z!- z9lb3d2-G#ID#MD)Xi`<7i;<{yndJ$Z>1~Ou7faB_KBwj@vAEY%TS_c`&-0sNXbHhr zbFW=BG*X2W}@O_lyNc5M! z?I8Gu{QXVXy*Za8@9=YT-X3NVFRs&dT}mKM(Jq^% zrYirr??&*=l#@91#g3FQ*0wr{6GK__2h^s64TxJBin<4(t~u_pS$^=1x4(Zeb`B;o z4*VZ+Dop5T=`m(S+AkVj%1Ue-k(6z6Ch1&^JIXBM85Xe{Do84se2$`2MBnH~0G5|M zTyZV3@P>#BQv(ary;>_@#i8=c)WvQK?qflSFhWJ{kUiwk!1<*N!PDe?Xx%wLQ70`Q z(miAo^Cgwg2!K;IOgWTtKWiWVrfs7>h`%pA2ULHhV*m(R{-&E~trQ37BRo{@@>=a~ zf3~{5sjz9L&%5eOpA(}aLG%FioL5JuuWL!_CDmwUp%Ff{pEtdSM6oM8-oSSu%3*5=2MKKr6E6lA|IkIeO z+nRv{6~&^jwr{|~S8dY^dCIT;r1qR#JDOp@a9a+A3OD?r_bjgTq4U8g`Rbc=@V$$ST-<3-%e~?aQG(7il8vNwBjHG?XEC(h>R0Vh-bp0o z7;CP4i8F_=Zdl*qPr)!kaCeid;<%!VL#`0Tab+4``bGj@{CKKc@!r=$L3T2_xsv90JEu6JnWKoJD$yxF2vkJvJ`Kx`w-XykYSoa=)gxe=Zh|6lRmT78WGR1{^H~6k-?7j&`^gt zKbfOi{N~n#!D0+KgH?v{N81bmL}%}GsBk0I8w)+^K`Ej@9ZQUG#GPv>O7P5gRg zBzNndLp*JxsJa{D3~Xiz9wPOw%H!X2{Oa_bTeP_bYs5JluJ@>8!5ugMgaR?8M(I7& zCH=u2lZ~$Bo+|3$=CJv11pOt{b=+at)l7jM342$RW9vn8j+0hfBvDaiFulb%+(}Qh zW3l;S??P$GS&W3;oT*(azphI)Tj3VICxg8m<}4CKN6R~qeWl;Pj-~lWIbOfpWA~ZF zdCh$^Id`3AcX23VrKX~olqg?DPz@Mu&nY0Isi9*daxLmAJgrHt6)VV)rXW>I^DiT@ zF!Uwr@b?T#Gog0Wu4;W`=!0y_(bNNFxXKz5^mjxKHO{TOW)YRkD01&ZSquR~0BifS zs3RPe9zKFel$8yY&EQb43vtsQ_a(YAb(AgN0d4P)&4ksm&xHF&!wth#P97|K@!v}= zYzL<|3o<(hkH&O&Q1R87E5HK67vM9TXVk8Xht_9qg1;5N-!Ux&C-5sz)AZ(_GEr4a zGx>gVy>}c}A-|GUWYbX;t=?e>ACA>{p8z|KugDuP%w(i77CYl{ui`Sjt@xVW1+g%m zmW3PcG~Ej@>l#0|+hvj?2C3zkq~%zAcDKdCB!Eo^-8_YH!1|tz>cj?WIu8%*KbuwB z-I%3VvV?`LuN#vIy(X$j*iPd%>cMJRW2zY#I}-L)mR?vNRWS*YiK_M@!`ZJPb=Y|f z*ITY@XqF1mvGWH-A};f>XiF*rbCKSvZxMhE4t;2YkW>T8rw!$Hhihj}h>hV+z&y!q zPe6*o$wYnIIYd~m!Omn`vfS%Z=XOf|QhW9W0F8_0x=B^Z-{-UDf)F}0O!qIF^X^IV>b5FTrcGd); z%n~C?Tx0y46dN&Ti{Xh>i{NcPKM0QgXWcluLkBO~EDmp81hlDv_hdB_Jrqj#aF-cJYf$1*?1CExn)l4; z?IT2)ouDZIQi0=j-^ zYeVmiaqQ+yGOBGyuAGQuu#XUGe6-R3X97#s6iDi85>aQ3(4gS(1_!xpgXVVUgHMSZ z1T%@(8PSbJ8rnXpLp!BjT;)w)Is4s1Z zs%jAALjG}QAd2{CoN6xycwzo1JZ@QcHI-~_HA5}%X(AtTSvwpH1;tGC+ zeu!rwV#C7B#w_f~^h@XOl}6LMPJ?`Yoeh=;|9|_wNp$Tvx@>+|qehuK#;2enSe zO-?+Is^>o(0e0|Q=BelShT-*ozbxdcU{u7A&%Bj81RY-F5rL1!>Mpsb_a~ypw`aD~ zUh-g102+4)H3jmRLb`Y0y{7?HCAezFGdPh!R+c?|!Hb|I2Qev_N4NO5L!d>v#~6An zdqHuNEO<{4iWcuKR9lpFty>wNVmN_m!kU#?7V^$7cZ*!`FhLR#3+~RAE{N4dgm-S7 zvt?#MTA1L#wW3!mM}qFr2XxsDg1jgM#1RG8xG^K#Yrk1*z^DRBm81p1Dd1cCVHUx} zAVwBrleQ{)tNCmrD%i2P^0>*&&J!1g1P{F^%TMGMGQfL?P(fF2GY_+ClV+SK$CW&6%3&BdroeC+zDZFRK=;i-h-X%>2_xUi?mc z{{YR3x(-+uaU000tUW6<`ro42mwf`Zc%tt5K*X?eQIm(nH>6+-a6)65)eWtavuT#u zqQsT)otQEuN;^9VE1|uMs+QWlv#^w6p)gSW=vVDfLpLyq?9zMA?(WRE-DY?Hz)c_4 zvUV2OqgVvDKJfw{!s>^Ek6O1^K|Iyf$M(ToY5GZnN|Xo5Y$tFr_0hogn{e-;iH{uG z-cs)de&>~Qkdd1%?U$SB+v~A1+Bh0^dn`Bsfc6W(Q5dt#-~^2fg13$T)an`aq5|;E zI>*Gw-7mpy75M#&1equex`i2Z(pil0tze4xdBg?-)E1-Bw>X~K!|8$aC`Ngh;sB(s_K#|JnuZoU~;J+x&I zrgY^|R5Ay>0`}=BCxdM2OIfJ-?a*Ep$_l5$#}BQ3fEIWyu7#{YRko!`^ehqDT#k2&3cAcb(VcpTt?d1`3)BqGsNpT!3mwQ(IgT% zWL4wyEQ6qo0wGqxs2$7SVCNVq@FxzF&>KaBk^_tAfaS1 zLy}Vg071lLCHn}OC6+APltKku4wsQw`sOp{yW!y7!=UL}yvVnL_9uM{$mW^35(bf1 zafaK=4v*JWKN)(fwTIB-j_N&go$=Cdh~AT*0_~qUr1)?DwGf$~dIMoYXcOV6K$Ybt zyrOx6x_={@-i5MtY-F)9nq}02kC~$K$|@?F{^Ftl5F8Q&OGrx34Cq_I0&+dV2#4Zx zuMXIHyO$Ysn>gcYZ`G)~s7-NMiA^Ip=jyJ%BrScY348=w2Zjvix~4e3>Ko$q%$BUZ zIY{&tp3yzkuuKKkUKm)wl<~f~_aSfIY2Tt>aN&cw&j^6<)GFs4vcYh|)4#FU_Nk9C zR=+LI<#b>a-7=S_<{P$TV%-*MCh#gjTEpXKa;RU4tPhPuV2u}^IWnDWDgbuGc9KHg z#q5*#G#=umLRbn)56lx(z|uRMtC7n^+II*M+apj6ClRI*o-)V}Qkh1UM9||B-Gp`# zk+mIS8Hq#yONbJW2fWm2@=!mR?jnf#IYsQki-7H| znGh(|G-$smW#NrvwGjwJJ%b)jp!Qi2IVJwf*Y?LQ_V+*p%ogG0a~F1psdiLfcr)#` z45>MaH1g{Cd&o_85~x_>IAGrd1CxbF$ewwTfFK94!?kn73l~1~^_F1rr?9qlidhKb zM#PYcnKj?phx|Esz$gaT>uWh3^ztNgKk066x2OrZ++$34uM}=C-lcx`BG5{4gay2y zvjjb@vhFQo8ll(h>S?{3q1xuE_tYVQYi21_bX6>}M7sl} z>har8N80rd<&P>c6JyMsq3_PQX9Tnn$Nvl`kC~@l!E;wS*79&EIrYZ##49;*t@u&k zCU`~*nBLb`e8-SlmVS~j@2eafeDLuK8+y$n5O2xMA2`$nj>@gREW1ZNuw?p3veeFNv8vP!d8Ku0vu;)w1RPQ@6|WgdoOVDiXzK*(CbvVfk{fj`GV1>BLB;9(t)u!0V9Q$9n;!dD`Pjy+Z*2jO`Ub< z;$Z0yWzD_S(G3s|s|bMNF%l=)^tyYr7_T+-S#QiFV|3STY^(9w7Fw%qpb{XYns`Y4 zi)MF?N&&*AakM`8&QB`rmv`um!#pc@E{AJsJf||$|AyBYhCky^*{tY7dw;-inY|0Y zr2hLf_+p=pntI0zRCwn^eNKN!`+iuf-}?dlW%${eenj|$XwA^Lkz$7t zBPFI6cDm628Tk{HBEtqTDv$O`ASx6J$H058P{e3%kBT*l)0C!;8(y%3$SZA!JPI?y zaQ{5VAssyc2d6F~3vMOWI!?eZe`jyn)sA-RoTAQ>jvnD4^=F)*4wdxJ(hZY}Jd0_@EF!QD5PQf( zOpFq|X6S=}u7kAo<}4x7r8g{i#{p0Jp~xT{_9P7>3vEo9n+S=Tn)$Yq_rh|wRW>5c zPNAvt)nhHDFgMZnSlvII6Fk)pisY#%f#FrX(nz3EhTVxbsULNsW^i^d#mH1ckmqZv zHww7&`rAspd@kWgWoxeYv z669($+J37I9mWCaHU+MX1|#y6kZh_f*3F)clkjuPK4z~tkveg@AA!Nj`MZvXB;u_Rh36%fgo&c8#@$3G7&w!X zxd=-BWcBBJP{9BI#mv(U`#Algs@<1R`^@C>GtP;~plj)g`ZjiX4qR<$&yh|;vF15~ zxnf@n+<7TJ5S%n5HNR-eBxGzzeSLsKHqom1a{F37Zk=MhX!;y5O4_21OY`23F!nBg zQ@xMix|6Kpdkw8CDp!`s0;r?*%n%s@8i9o(x8;RftbEKv01*oau+k~7E|jJD zvvGJOf8Zr^va^mIcZp}PkKpO-K`gp{H1#Ysg<#7vcUz~lI|eu%8v$zoeE!O7)QVgY z+ZSJqSxBx`xEA27i&AQI`99+ZpI!|0%u6Jvs%-C7*s(UXiC^|%_X-MU5|ct-lL9d! z#*-8>5_6ZTviY8S=$aNSwL~Z8pto5pg6VEm+*Bcn2?n|rCre=2t@c)M0c)8vwR4he zbN2jMW9!GfdvgKM;kvHY6PT@FAL%=JMSQ*C?+cWWb+CS6lcP@(2@K@$qCmd!w9V`^ z|N64TI3edg5+?WXNLvq6o=&gBF*EmmR#ZIYol*>53sh^Lm_=JIrurL zX5t*&Q_#$%MGgKN-GqH7FSlOa2MJUG#|_3^sfD(gtgvsTDd3JH9QEf{qN+O`LPfEg z+gwbRbB7Kdog}%P_(IjdJQk1c-&f>j%}+%l8ECfD(O&@JI(L^HejXtmn2B|}6%12X z92K|9sk{?7K9*s8i>P+NAP2G#@1MhH{QOlv{t9BEV`|%=a7828^3H^t@b+DTCxU(e zHA!)e1_fG6q$|^BJck>2N$tEt@E6XcUbuKdv=t5MrZHdPr`^lF0TYR&>wFE}L)sM? zCDI`5jsRARd&Tlk#*}WDo=H&snEokPA;XF$rX>>gJoxw=UAN_H0eFfcuI{ce@4Gt( zg?yuvYHbZYzX5S8sPCJ@0s}$b$%T;Ii~M*E|E15xfaqx9feb_#^Jsm`nDVi}-nsRh zm~9MVnav%*A~?Cz=)@-YybTK|S$w_2@0p;kkNMTm4HcLSJ%tP+%~eil06UaED9eOXRF9I^_A}1@?Fh z@3lwGpLC$_V#-2c71oY_L-1cF?eVZKsBA4!ey3FO<+*-?v?c=|GschzeAOQ}_ z?yXOsC8yno-Ym1%7)kAqePmIs=lNL=l=Tf7R1&MzZTDU1S~o-BgjmaR6|n1R0fBEo zvEvS_-%aGNHYAtub1xG1b%UmwP63)v*LV?3L}HHzFQCt}Yh5|Ko&1PGn6RQ#>dA>) z%>_Mk-nG85)9d-X>+)eedtEO~Iq6aWMnwNTbUG-f*x0KI*cZL$xiCc{{+I4{gGccZ zcg>+)W5<6Y1V56PDZKdYOr2$z_se>)ay(g`gAFXvXk{cOnVf!Kg?ErSAQz3W{^%dD ziu=)wGxW$Ar`cnD!O!r9E6&6TCjX1Z1w@fmS>}G`1lRi&)*CvgnsU%3V*NW>l8Vo$ zWIQwBqBSxVr~J27bspZ^daX1sJ!(kSmVk%< zIO8D-IKR0~O0f4Dz0hFdm{U4$?+3JQIv6lpuaPwN?ucZl>xIPYThQ3s;t5^FB1V}; z9h~j}fg!^-x{W=Bh*mVjW!eJ4tpq?sYxc%i{7w#&YQ<~i#*+59Nl6aRlqGS`CENEg zQ99oI;tW(pix)@w7!?d5&gvJMj-Fz~-F97?t3^Xcf}ZUz(cdp3NDf+!R=l96&ey}u z>`pc7=<0j|1d(F~X+K_4a|&*~u+ii+W^PwE;*agcnMKiNwdMF6ePTjHK(;IOjt;l( zk-)NYuM`*|Y_Dkcj-c4GY%}Fe*9O=;D(AR^g(iU=E4^p1{NY$Ap_B~RC6J0j`Eo>- z;&#nmBz*7u?NP|QN=(%bpJ^vHn>%vfS0z7S@LgU9&8CU$kBGThXjQTs1u2-UfvCvL zJZzHt_$npYYh-pVj>P@VPA49R$OR?RRe-w12iC%GR#aJ+S*Na=Y{Jtbc zKj=UYeJ>Y67Yp3c=UTOde~6;QD|m__aCYFqdpVki>i7+$n%J7}JG#58B)1^mx~SY{ z0I0t3D?9T)7wm={#x}2yY2U3!t=wC6DoPGDv_cXxJevHulI4__-I0mKX9zyYB+68;vr`&zuBufsFaK6R>3EzP0k!DG-Z48yImi_I}1i*gzJ zqgkF@n+))r-SLT7S+nrI_d-j!sSA?27DIo>-4VHAZA@1|zdjBuvx*J!+YjMzyQH-#?UP=ud8!6~1N>FjNKP2)Ol9?9mU<6*EV) zYvPcDdKFp(vhwTl)6hI$C zu74l;ga}s0PBuL01%C@}?E8wK(?6YTi1qV&r}c{tu7Y{)gB)zpPN$n!1j&8GnjNu! zcaJGw2oPLW$B4~&B34G}@Jtk^VtNTYSd1f{#e8aua5mHi20x6m+CXw9Atch`4jX)M zh{Mlasxi^LqC$tEgKPRAnWzvOYelcI#^unfL3%?If+HkmO0v3g2s33CPI5TnAY3X` zvfTd|@=u2?cf-o57if4WL1Na%DFT*a&kgY5;9GLL(*5?KSyv?J!3kOE?7^*X>aucD zVKdFwP`Is_skvjL)mK1raYb8`v#>>w4U#&q{&{^0PYPsVmwz3@utfL6Ee zVn|(FEssq?FlU)FqN`nBCt%)>KG}TwffaM4-sy$k)Rg@)79FtxNhrvd*45d>iuw3} zpwb8ArBD3}r9_juS7g4d0d&1~^|Rpda+~5!n1^Nf`t}70-s*5XtaQI%HMemqOM-|( z)JZ%Zy(i92j~9oI>S@~FvK&(Dnt2!~FAxG@IGL>sEJ@F1Qv&Dv zivp1cvM(boQb9=<{T6OtA$nW>9G*5m3KZjQKdUHtOaFkP$cw1^qMz4dz~+N}?9izr zn=sWY(QgBKIM@bu)0O>6&(k6HI1gUg)j`8#O=^*htM}5g=%S-+-m;zbZIR)|zYjIg zXR1>cmiNET(fJBvjPj&@Ur^wRdT9GThxW;KpQl@5$f+1M4Tz>Ep2psjVM{Ys&+J8n zL3@q+y>z&K$D6++EOB_vH@;E;{^Ydr9{%w;9q-7l0jkn1Q+8i?frmKcbzMAD(|| z@Qg_Z?S@>k@wbS;j2xK38?4m+gyF3~Tw14EA8~}hpC>z_;TlK3Y7*n64!y@^q&MoM z*XEwy(2n?(b!R*0`>K~!KWwIb)_%5*uE}YY1KJC@H^fT?e)5Y0V1(oS+*8>kw0Ag$ zSAh%FwM}+du|1GC1V1R7hPoshspsbuA(+qsaBI!IFd}jolUd0>tDhgaA4Hm#FU->V zi^fw)H)C99X_VzNgH6s$pk2T1sP!-&9T9R}Kkjf*86`7NPqA4|3cc zq-`j?PRpYFpq1UVFySC6$&;hZEqbe~%wLFNSQeZ@t5FLoC=}MAEyR&{vIH^i)y;v? zu0x8ba$-T+);w7nZ!nA@3 z_DgVOM@7=YT_L+P$Kxkk!2+eF41YjfA09?<6)@4Uqb4gv=FZcUy;{yUXHzn_p*EKn z(y1iyxGx@bFc~H<^<}XJ4L*?vZsO#*^3lp!Z1x`BSlGl*lJBw1)}1C>zaOb)hCfD95rO zYU*4cf{8-v3?)-DQPXv({TEKUZW&#FLu$85)Tl7806pZc5kWs`1I1?I)?Sd*()Pq^5S;7b|RRTi%Nhn54 zM(fS^s{lwd!wP+`777w0%$62jBDd?n8$BTeu&4 zA}jGn-^=-7`W`B_@Iy%1@GJLY-v~r4lnffQcKmy@=Kf%qzaZe%P5v)9g8BamM{u%n zG5>Ek;!1tPZhI2R7o))606%6O0sAH!5p>^+Wg=C4{&2uC2#D=!f$=nf6=Ne=VAB0~ z%&DtuqOeB?MiT#%5MNTnZEaCiw>--S&|RI~Ojv)rk=Z$)vXrb|UzBF*yOT%5T0=YK zbpD=((A(!%eEuwa^m}koTV|rY<~pj2yPS(D+v#f!X==-;SchzFZ3?uje($bxtB3*1 zOaS(!#(MMWu7aWIaON1h?64+0r1G+=@FLDAcdO0GRR~!Vkm0I(&oKQr?jqBNu_m0F zp(?!LJ4Q*L%b8bZfo(`-)+HV0v$Df%2je%)n4=B76+P|PLq|{JJm21_G>rCAr;(0* z7+>{!b=z)3UG!y>UAHqCw@Io~BA)@d@@`{OjR2Kl=06pW>tfd>5e@@GAzyxn39b2C z`qJQ{Ub)yrLYMC3r^Y`A2nYCt*I<)^Qb#_tYXPa?KXQU z>%@~K#+ES0(=M-bpN6R7vZNO4@Qb!fvl<=FB?C-jSB(bwv#C>EM$AM)TYC|`ZM1SI zKPSro#ir*vY7gX4Dt(jVqFGHScL@~BOeJE-_xt{oj+sq})negGr}N5$-U*ndZ)-OL zpC#7|la+?cHa{5|K9+wl2=1P$$9CAs$9PJx(Ze_l1I_Z$xkmnX{Ankho#m6zidSf@ zM(c=(Qf@Xm^T}r1_J;l@v4CBTHD%tb1Ubg;)f~akaZzGWpZN+VSQK?vMruS`N5?^3 zGIjhV-(GB7#7F~Vikzmv_{I^)%;4~RPF{}yACoWONb#j6cqECcub97OgXAc*m|R4g zrmJ#>^rktk^54k3Qsly?UQD2@*b5AM!kxCocM-6+;wL4(mRJCr` zrm3xN8KNP?Cf#PMx*BxF2xiOY*Yi2P=MErQ1{sfFc=;>fUkz3@&y^?j`l1{HC^L#a zgJl*5^KYL6b&7S_DcdFvH)>a5ecu$!D)FJbQaAY)AzoamU09px%4q=rxoxZJHjD()ajYRk2ow_2lRPfmjC9i2N zW|nP~sz>}Cnm}zvA0-t=gqP}|0dw!Aqs*sVQ953dju8(=fH;8}a!JSwr?AQ*%MHP_ zqs$C{baydfOZ;B=bq25vA2M(Q&#<;)m1?bgJMgWm8Hwb=%ehtMDw>2^u(tnkliobn zEUxn??BruTx)4rVb8rp)d=|}|3OnrqsB#%Rq_^Q`nYM=JuN}HV#u!Fa$jy=S|c3NDQU1t4LbEtgQG|G7= zk`(=BH|^m+KDubT_*BZh*)_An@=~4rlG<0Qkbzo6+Li78c75tG-D60PimV3@Y4Nqvm&i#-*C0?T0hnvM}`d4k?~c{FAeraSvBORljOTlV`RE8E15)R;>B0)%SgB zvOd%5B8%DfK$mM7WgT+QwCy;u7kP;7u35>8&ZZaPNvHr};2w2?pb=Ge5m1;JP`VLN z+!;{*CRh+E2s*3e8uPujP z7*r2f%R$z<8G+fC)i~3Bi#G*_2^`G&P9b$I$5f zHqiMU=^z|{5bgqhK?Xub1cFM|rj!qrA==Vr@U(RT`Eh|_+}Yed4L=74AOa&Hq97BV zI3#F5R60>Q;0K6I5sDl-LE#7Ra`PkpA|fIl4grJY!eiL;2^A>HN%TOh5d(Q2Vj}Dn zQa@8LXTS?2ee^WUL7|~P@C`l7vCj`i8~3uyau)-Gp3nPrKQ1fMw3rwJ8xh-RQY25? z5)lZ`G*59OHr$=UX^yz3*i%k1%rV%i*d*&9+b~6}7h3}zp^zjqT%0Jv?g-HX6z_95 zGoJbHZIWRm73bjhnuL*PBFfD)m;#H~daJY&ofU6VTHf?W%vQUpNQ^8R8{Ty1ckd+k z2|&s$TC zHeB{ONUZ~qO2vsvD9Em|!T%-m!F03x_7k zQKo-0D*noVA|HS{fk!L(^dw;|B1N(Xe}j%<2xjKxk)EPx?1g;!3>@B1WHLSec=c-d zG`UrwcMN9O1h|9!f!~CY zSU~6h?yyxB*oAb&@yc>*0q{E7z4c$E??fGUV1>=$yyL%_d{52g^Zz`Z zoTU4V`oG|kIZ-6~NIUXMIMOloq(kN%axCzS-i$#&?bL$Wktlg5WL%ROFd)|&*Q_F!dq*zF zA3l-}a!KmMDRm}$OtVST;*-ISA>xugw;{X4f^G>J)uaz}Nz$q!a~Wq`UEqPl8jvO` zl&qN^V1sO7b*mPjfZ@y7U@-y4Oz_eWqYPD%`YMZjgt~)w3#$#FJJRW%UQ}NhFx?z@ zV$ghmXzbVwH&;TtSZQ57DK%t<)kuF0>Qj%vcL}!#Zxq%mLPPL$(BOZ(E2$?KBWX@a zj(Rm5Hm0bw9xKY)sV29^96LGnM%4g_LCH-{?>)&!nYhxRm}Qg9ic~Dp24ZG-ZRV!A z%;uPb!&X@P%hSUCYIi(GdgFAy2<6GoHUIT!qf_WJv`4pdH`14Q_BEjQDl0Rt(kiPn zq{hIX0W}xzU>PXHr+0Oz3e}F_Msqt*l5emJ@dcs1GBk#u(O%!^_@! zcKwTB+Jsr*!?|Wziz<1iw2ByNr?gpuX!DB5#Y6KfuNO^$`>vk1SmB^yhGP+XyKEPp zq3d#-cZ;8;MyXA>3B%)a$qyJ0qIqox5{yx)w9TEK7tF2h)6JY3$=T*p+=_=QPeA|W zu&VEQc$VmGyq2FyT>4zRjGJkW<7nGCYEF~ka(o)Xp+d9hygDjg&;P#5eL8br9OGH= z&Q}Ppn!hPPIEQCs(^!x+>Imr#I)K&zrs<-=-23GZrN+ypZP$4ehYpj} zdaMQSv28BVCT&~pAub|4RALwwU)h63N=F9!mNXGZ>ZF(SF+^yec@#FEh$$YeoDJL7 zk=UjpL`2oXeI5s&;CS!IP<~!Na3>h;#XO8>e|8|M{OtCRMljl#KE@9MjoP#oRslYH z?43oR5S_^KPzF+&Xc#5E_ygqRqcXofOkYQut@J5|h%^#$E+q;Wv)&~WJXidP0RU9RzLF!KI$ zaNS{cJzS+e{iEk|gtp>65LMsvGsF+q_odiA5XG_APe7p8$QOjhFzXB3U;p|Mwe=w| z%$_}2#Qiin+M=tFY6}rwe^=yQdȳI}eKZ!~ujn{4L!qHbKQ(iunnwVdhN__YCW zo}+c5tQ>nJu&wOkvaQTHec0+M{Z`NxQj(B5w#%O+ONN*@CW}vJwOd2IAq$p#P8;5l zo+Wob_TiHxzfY1zpCFS?lA@?wmA=tgnrneH6XNb6+^o{@i-BjdksVHo+-F9zCe0he zMr6pIKL*hlz-?DVzcX54;vD{E#Md~A;^RXBB!C{dN!6ED*PrN$R*jr0DdL;+Hkw(s z&_if^F$+zkwI;zo+}JvICHT@(yx6NBYy2Y)zUl6~c2oI5lvU}RRe9Ti^_5+?(XQ7R z!|b^x#00Yfo3f;+;0kBz8W@MtEFi3q#4UQ=ny6M)*Z?etGXUIa-hsv?oaE;d@WiyU z1ey@|&nEb+MHdl{JWeZd|0~*HHiOxT-+1+?{|NpDI&6#5oepM9W)-mqrRXJL7y7Lj zQj{q|A|(tJA`U72I&bno5`djrWWiKdMx;Bg4li7NkDDGE4$Xe zoAX^&eS6}VGj#nXTTYI?knz%5NR9l;+a6h1C z$ky9`@)$p#^YA=`7hiipjAqLghPJv39F%EbGb9iD4y>WlEFyY50lS#MH1G!_cW72} zW_A~t3?WhjQe~H|^Y#VD;xqEK*gl+G4ljlqnr|FDPOm#!bQEPu8w?J5KN~W^sdzK} zRil5~21pzE$s%(`8XKiTKI5+kv(?Gk1Ycdf7i+ER&8>=Vyd8~bPgoBgD(QtRPLzr# zBcrrk`V9rr=lp4CXpZ?u=x?@>3K^0!q ztn}(w$*R0|zgfZ@vCM%Z2*wtKXtW!fcN$Ub`1v{0-#kZWHUGrccV=d8kefj>q-Uka zBm9{fo~CDEiIbd>$@k=fr13U1Juty79v>%+2}zDeSg3-}Vf`~PJkHMKG&VLo>NqB@ zybkFOLom0&sb_eY7+!UUSVm{9CeQnXgTR3NXpI}<&v*?Dp$@M1Fn9IZZcgy+H_?ap zatd>k%T#}er`%(_o2}uV>{ye*^+MG51ua{x(kuJB_F8ObO4b&8&+OjXt$D<6(x((k zCMy4wR&JBs+Hy1Mp*&MYIsMcwmsgFJUNNWOIp_tz2$&juCu+t9CyZAyI%@WRGb7X-y7LU6~<5O zpN;qNvji3&=4-59OLl?T`Q0oGKVMUG{XRcWa|%D6Zx3Hf{r=xi4@c=4>2vbU9dw>= za}NndVv9C4hM2U$N0ySI7m$5_OCT6>A^r^R2lCG)NGAQ|e>qq}8CHIcDxGluu z@Cg3TlI`%{3*J8X!mbF*Kf7kFVi{;^L48p-v@?{NZ2db;gE~`AFq$W+I+I>#Ke$>> zol#%Z3AIO7A@}iyn)0-99v-*p0u9IsE2t7_Zb075d6vW;F1NW7Z}5X9s$!y#-Lf?M zW2=@YWhzE!Q(dwJ+GNVI6-}~OsdS<&bm|jTrSf7~kt{vNm$ao|gW%76_AjYlC_Vya=3IF~z#B}=lKVo4m|^|Q#BBI!iJ4J_ETUtSNux>>Ta8ZzK6rx1zUzXz3iUs{ zlg4i1?24~^JhpG6!rPya5wz|}T)+JH;g7_oz4}*SMwxL_;+O_d9JYf>Ad0`?&_vU&_IDJY|IVad4Qw?# z=x7xJ2dtW^(VU?w%`@(5^ZKOmI&|m+bGj-Z z>n!NXzFOD}PIwL4N8F}7dMga~_^i4|e5Re(-{ z20i=NABLTSc!VumHeWL@K?ILqCl3R9Y*+gZUR%C7T3qemK7i6c46l0KTpw{P-FMUT ziE_FZ-40K*UBd-CLuSZ!6}3Doej)rjY2wv>z3aR01&se{V)Gkz5nebh@;UER4@9>D zHQRXQ_e1a(coF%=W(d}s$)lK?i4~|eQ zn;+W3PQe7i;sblZsJrhNDtQX!Uu*U={JQHxrl0=T>K4GGm>3_3o0-XEh0)A%RZ*U= z4Be=~F~DE`L?i#y0{K2F+<_qM7W0ejY4Vd2sllWkVSUJI6A34H_MPl&cf zg;bZqz(&Y+P3l33DuSVDN=6AFggBtYXIFr@f!NfiY*w(5s^0yFNzI``Z`~R5rG0z$ z3+#c{JJFr|Q7io=ZOuVuG|EzBCeY#DsO-ExCL?A_ABW}oeNQS*E149BRAown%LjET zEE1WvPF1IDd|=Fv!LGpC(b=gg1D~r(*H37zeOb_w92^v&m*tg*230nbEqVrk7=Ru>VwRNMU#Y5%cc8B9&o7Bk&S8 zW(4atZM+6xLSPOvE7d!osH=q8Y@<|Vavqcg|AB30Gl`@fUNDvl>@vUM{V4%x{h-fV z0}@O?HkMh)*sF^N=C!6R+2kpK=QR%TL1*WyPXy?pzJ-w9dW+X}{?@2XJJQKD zC1sj~4A?YnJUWwEn)f`BL`-*_s)L73ZH%mfUjtmDb2y6BA=+}qusRpD?A&eme4ub- zPbg2wnhX546bS*LV&FRD`r#+8N=#h|FYR1rBr2NADmx+Rt_ke*@CSlXqA@H9dtr%v9Fz$i z`}Q!DsK1i!`{_}6LVq;ujDi9sJ*~Vcr@g4MPFCrhfg$0OqVB46!HtW@x^#hsCztQ* zl&zh_Xih+y#m68SRp6p2i=?UyGj{Gx^h*i)_s*CryF8;J&RZ78KQjQ5_ z<+hFqJ){nAKU(TFZ&HsD94`#M-D;s_`Z)G4u07qoFmD!LCw&C6&7ddOht}z z8K6$B>v(;=dEbPr5<`CRd?$+Kk^DrG5NA(*`?A%;T)c#RtzO z3OvHu0Rhzr5{hezoDNq<2LUe~qeJ{e?a%;`DYXE$HTWmldM64m*E^@K{crJ;;7beV zOkqk;$2m(`>uFiv<~-haG)*HBF7~j{)KLIe$Vx+E4)ud&4#q~o<*k*beJQM)wZZen zftlvbqCiNf(BG-4yx!oUDPD_ZS>NKB3j91lF|Ufl$j5c@4pj}#lIbT4@}L`q*TpM* zs&9-AF*07GjzNYd9LGwo=gTa@m4c*}HCEYkGeNH>7kpha$@25C1}Tf<_Up5^T|hle z(Wssop}mXm(?vzj@&}dhrAvf74*C*t; zGqY&{6Zo|R)#*Hkcgb6Ej0zvPy0_H^xjctqnW3LS#;DoJJhgW$!^Z}*9O0m&eD3-k zd1qogjh{~nRsXzLEtzSB2175Ife=eU^n}@59Y*@kn>ZT7`@Gm0xtw(ZQ8Gkop6X83 ze!Ql3Mp`RttAV#wU0UUSup|_z{Bd?ZFsPFL-+}TuSUi2d_W7a!!^MBR3jV@Nzt>8L zj7u$02c-O6@C*1m@jhe05$W!6s^Kn?x;t|=4F{oUmKu#TIAXqwfRC$= z&7)_K1zhX`!L^JL*T99Ix|&pD(WC%H6H;aN&0js3*r14H6dYIiHrJt?2EV*nd~IQ& z*u|Y4&v!(TJIONGF+R^lm=#P08Bk%5q-s@B-rxvZj%DJ5N{}m$XO>pesltJ^q2YjE zs$n_3)^m_j5oVtD4C>111!RbCp`Zft)c!&I@*9WWPqjh(!E}ebo;z!gt%QmmohLOy z0=5Nj@>}J+aO_x|Y+*g@$$T~^%{u;9xvPnMYRjZ91ChUNt=2Y61KGK_eK8b}og1Lr z<{)I#!UO?{2rW-D6<|+v>YX?EqY?EXWduupbcHCkk7$Wim{m3UHLBHgu?<9=#4otF z2$5Kd!(5iS((pUaav*h{sbX_UHjuER++?LFv3M=%4&VfcBFCrAR}GC~Df^z)xg2xd zV7gGd15G3T!+$fb2!Ouv-1q3N+kHnN z-FvD3+Sy+*#RK^8yC>?a?1Vzsd<8g3$Qx6xm@>}PJeNTkjyr){EoC|QRh+7`LXnc_ zK>}~wOasEV*)v#UpB5|h%qpCtR?8tKt;8TJ?7V;gSx9`)%at*zYJy0_H|CHVF@lNN-a1O?v1a3 zIW};&a=Sp!TM^aHrPb7Ox4nWAsy9KOP!S!T?=SX?v(fL`*cDiyu!Kcm!*ydhXSj_9 z!HTCBZM`PJq8vKD0p)S{Of5J>Ua@QZ+O#5*AH{>f*HpXRJPTz?o4mJ9$_fu>x;sXM zw?AS89vucBRW>w|VHGmo{eX76;KaxaV1Buzi;8pva=iQ%r1MF%vPs*J!;%ZxOg-xw zF`xq)>=h*}CcG^yaRHeYdc7@R3njQino09f38E4nJG^_#9H6#Sw@i@nbG?ykGTQIG zo%!XhDhtH01!kT6xwAuM|Hkt2kG0oxC90N?=jOg$QOF9_XEXT0L3`7w9q(VV@Hg8N zVo!GclnXz`dI^Uo5)OIm=_d)s6<&BO|MdMJhn8pgp^v-6A`!(2hKr_wMT~`12-muV zebLGK!LVFe2q_w)E?+|ljA23GNz&wBBMh{iJB!Ir=cB2YVJ}U;GL9GxC*X6uYa&NK zp86JW5+LD^)3DXRPRuf$HFV+nj?rZg@+SK*elszrC<T1Z`iksv0YkmWok?u$^){p+76K%Abm zl?2wo;h|h&uvw7wKGp<%@{=DiM-sVkKtpAqxCBqS!b2T5!JFAmMW?)RN)(>qp8PujEUX(eDoJ6-& z$8zbvnH}?_QZUjoYr?$Ds=iV$Rfk&5ow~P9zSWShk86&BhH2kai~J#Wzb_BpU~UvQ zkW7NwY24B%^VE*Jco4+Wjqmbsve>Hf+MPyhG^r@w@n6ua_$)k!dlYFFa)@(_`;7ykCAc7!ise79~BXmfLii zo9|=W8?3taKC~zj{ke;@YRF{hsWT`xaI1vqkj)C~$R1akoKiKgKl34&Pm#9@7#cmI zQOhk8$5$S{6@#hT)LKl^zDFujEc&jNuPy2BZjhvH7!1`V6rPuN1DJuc&zaopLbh@C zcI15SUsT3G73t&<3Lx_&o=zM8TCaa+E}7MRq%FPqBR&WtnrZ>_DRjL7svqUB>lTuD zjuH)(PMf2SPPfnBfHw*-J84YduKMC6q*_}YQzb*BO4aQJSL0<4-XOu?@uKr*A+eaZ zThmU5w#hm)Ju~Z784N%nBD`~p{vGc@>NDth`)J=6eHCX|>PLB4(;~vH`OLTXC1DFI zlZlaOCX&A(OwJ0dwKMw_jn11Iq8- zXaihTtv~8Tdk!zHIr{1|CnTk2>VZhBojoq_*L%9+fMp)!vi^iX=<7d9vPTAG8+9Ma zIqM=v7)4>uB6SOvb-nAWPjl96M5$S(%0dIxbItdXG?UFr+hBj~l1A|i&6*EL@FC%G0 zICBwk?a|snc$}xIRc0zJ>8YCYs)GQUPHvp`{BOGX+!ks$o~XD|`C7`susOEq_YzN( zs1;?>yCOgI8bRcauPOXXICfM=m2#%7Gkb5@zOMAT^x;*ZJt&IOV zVpqmC#5=GBNSRC_qth;FS(bYm<9&G3plejfAg)@<{^OP8l@uG;V{(7^s9BILR74yR zpjt{U^l`Thc|gUo_&a6G6$BHz%Z&M4Ym|Zb5C#8WGoa zyw^NH>)JtFM<7a(-)b(A!eu@?)ozg+8fQtR)4{_<78x-GnqaEwDEG0+@!HStQ7)6d z{$*%?>_hA`xOfPg(^1Re0I8DWS;01WBBE2xbA@p}zrAPw$YUiRlV8}AJUA=tJ&%HG?d|KP>2Ri#(RJGr;TVSGN|Q=7!tlOVxB!43=^6&#S5se2=5-G2p<@u`i`AC}a29ktq=1v1evhsXq}Y652(zg&PJ zg9TE3s9n))7!_K0&gx81ZlY6!u%od{AsU#?W1?bl(2)^jpeI@JS{8Jo-=GMpSr*$f zo9}HSn~xhnYa3hXVc7eAWrH4YKHUqwtYMIHG%8Qy0r~riv=C^h3PJ0Y&)>!=z%vIY zZ{+3pYs($fBn2gt_ha9pksGW-zL{<5U`xmpjljK7@XvO6Kc}@6K4xx4tQ_^P3D*sH z%xN2ZZwEPdFz6V%TyBg`t&lUJ z%r!*DQH9!!MCYq^;w9^1ZZP;0f5(Em*yoc|6W$aLqh|O--F_wK0(x_bR7nLxl{6Z; zHq>}M2xX2J7`vVQl&TNUc7}PWW&1XV=&uqAH_mCnW`;XZRcI$1W$!~pR6lZwli zhRM1M^l;tWOWP*=%AQ0@fHKZfGHvafB%-G$19HNyugsdpYu!TS5sdy-jWK1s&8rsv zvO5O@plDb8AL%l$_$r1*V|~hbQvfX5qqX*WU)6R;5zM~9%dIL4X1U3Nzf`7J_AWN> zE1BPqK1xTuatC{k!?V87#WB;e!$Ih2xL-W*4kD+yWB7*Us>{0L$^#^GpLuIW%`pf2 zIsb20y(hOvzH&!4IA{5W4}`##M1AP+DX=6WVGN^zJ^9I2NXJ^2 zQEz|<9qrv3r92OgP4CL}xy}cP!&4pIa*8oMj=#;qj$-xP0(euNt;;KH` z;`6YVmmkm@=s=w(1^Q=HGeNY7{zPokv8A|qCVq!#*Pvq29Ann`sz%9*{F^in6NdcW zN&lj{h_fS6&>zY$8@nc-%R$+#F1Bs+91w%Q<(JfDmmZ{z<=nvKLj%7Jc#_nDSg{dV zJJ>vEUEd@g#DYIZe^7(BEBLy3aH_nLK}i4@c}-`Ot|;Ph0$VPbGL3R_x%K5}NFDSn zQ$(%9VT8ukg}nQg<}J1LUJ_oIq2AF;S^C*b0jD))C)%|w>HYFcaZpNV`TKo=L-?_8;l^?y>jh^4v{O40jx9|IOH!a3qY-q(GH?f@`nXe&9fZck_KV3g5e}NId zZ^1fwYgWNQVbr0^H?6P?ZFBH4r zky8FDK5n~MBl-EpeAy`jTpJ(dN?gz}yie8ikq zjqbpZ%=jj#77=*MsKHRR@Hv(~2US`)4q9+X=oD!{VMWG%SI}4IuEb@iqJc=ed=ZBI zQG+T>;N2(jkuiyqQxOm=XypiFoFnGzia$9WI-f%Y+Tdy}D7m~bNL@VXYc5S@{Y%Eu z-x2hzz^C02q7<%!D4mh-j+@M%GwnqtG|3?=c8BU3XT43rEpIU@PMvr+$EL1*ZGHI? z`iHkVdqid=@+@0PRhkbAN$|5y?50U?30*ORbf4fllcj%}@rS z35JSj-@aodXX#^FI$U4!Suw{iDP{?81Gzrk1Xe&@(q9;481ehX=Ik00Om*8>m1a=O zIx;3GOc(iUB$zeLn(Rfu-tEVb^}fnkg&3o~%3z;G=xSa&)6jCS{xh*?_TIcOif&9a zV9zStbtru_Oi02$Jw%ls(q^k+8s-nLh<>lAVvgXIb+EMDe9J&I;-ueJ^&@FgH#t-u zf<_1Yk`o;3oBk!*CC18K*2?b0&DcUl!`EA|+JDOWIZW#nP)t6C@{{u6Yz<&!cIIP$ z=#bz+|9xZ4nLUXzCn0*(s4siJ=tv=-@jcniDZi!P^pR9@6dx7QK_1yy?6FIb-5{1q zgFW%uBIG3naZOQRx894SxP61f%>8KIATYGKGy1};DMNfo6>KQ$$nv96c;lDLC2ZE= zcDn%^Sac3F-O=dhsdS}VWn|V1YMHOF@I{7A1yFdaS{fbvc6K~XXCzLVoB(-}ylXa# zIcB;OAUUP(+ty%nkAkR&kZAA4HnwSfl%=N+(0R>mt#@X4<~WptG#*aYX-89Es0}*Z z?eqLi3@SFV&&2wV(pvkZ&bkWv&lWjFj83{YN630$W25KX_r`PEpgg}r#~5BJn*_sZ zitPf(^@0AmI%5S3vwncx%P2`Vo;9T3QIKsTxh%$z`a&)XYy4yq_;q4C7&zly(7txc zfP$**ebQ`BI-8@o^BhDxrtN_Tzp1xsVJ^^MdaDK!lLUcU7n z2^+eF1`i>US9be23=FyhGL~#MU8XlrBskrnG<`6}VGpxqc3Essfzya_)|4ZTK;lSa zdYL4eEVUV9CzF40M=}tH(`|pjsaG@B!O_*V=05%t&

EGE?c!p29T0b)wf0J1yq?{6koScySA$qJex%6G7dQsQz4%s0UfQPn;j;Y+^f!=Uj78zK8*ShM_N_;qxzhHjV=tjs~-@;U~nH(pA@K^J$9G zm?DiDxZ+h7k~sXGg7pjcI=Pn*@V-T7SKm5pE-ayso+E$sa`{^~3mHGt?>Mn%`z=_D z>n-4-J@ZP}fhwB*eIp44ok)?ch}vFdQ@?G<76vw=E^d0xy}|ot5NpO1Gi6UZjmI6U z2vZiq%(ypike8S)PU`1>uw71L!q?RGN!|Z>$NxR8WzM&k1ZUf7@h6~_prfa=@D`}L zZL;jCq7Y~8EWpT2m`!h-D2Lell^%L#4#pk~k)K|=?p$p=gvXK3@K*)>?nxJNvQ0#XZ9CVfu7R<#LFYX+mbBmr&m^8dk>P6$QEH889up! z(kVHrI^aDsD@b*lih?v>=AOR~J5Pp!+`*2cZD0QV2w?A^*IDV2?!u+68|42?(kZh_ zR$Yk-i>? zGTrZ-zIb@245w62l9$X8H%B5MwbOfq(`UII#yhN}kl%Ota=keYE5L5ckzzJ8>hzC>R%n{dS{roi0O zl#w4!B>j#n^~!M7;r%JccsLHT+JpK;{h*C}sB(XN^gDkV5%E3vQe~ zx(cp2XKoLRDn@@w`uby`D?QEz&N~&zo`>~rkoN&2$*4|R)>(RxNP7llsy`X{Y%7)+ zra8|>Z}p<&;_Q%wFQ%l4Ve}wuN4r zF4L6YXN+zw^=msBb5ZIgm&5ujqa*yvQ*dh-V}ahL$v5`0&Wf&aAZNJyoeeM^@KQj6jiV$OpR^584^XZoiS{)|FCE!jvKBGjuJQ=zmM=t@I!GlY9nY&g!0la? zSDC=creZvA!}5b*VoIU^I&E-_rcY=-{fk`J>K~fCjUT|sr5U4-bXD*g_ArREvg}om%|W-!o{`0|+ORnt zqMR%>wK9yi*4p19sTx>7RC4GW9FgjQLG{B;UdNY(7&S8yeo(3~D#(@1*;I3}lR$r1 z!g$Q&frVUQdZjIuudJ|An(R_Y^6v8~gx4w>n(F#PUy=dQ8t0^9r%*ZuM|+lym=vg8 zAMu^xbMDy9R{1`PWow&;6q-9b;bLq4a zYK$Q7LtD|NfNUp0Eoa(f)nf)>xn;ik0ke%0J7jc!NsI{P@3A|A;mT=mevbj5 zn{R=FpKTp$ScqMslF=Ne@G|!~MQxtN243x30JN>96$_2OVH(4Vys%4uD3DuHVloHS zBuwm8V>JJde}RY8M2iuNDib6d@}W=^NRIJjo?&5CPhJrXiejn_7FtLK{<>I=(?(7-Emv(xGJFr7=U`;5wW>?P?cU zU35RMh~>}?3ZTiu8$_5sgABe5aHqQK7EbTTht!KSt)v(hQ9uQOx%gJEIFhP$`ownI z6mfI$7G}C=9LXZ^2jeIX`~g+$a)HHS?e8%j@bO0ZyZ<|tFzcYy%-svGw@{N=;lwhy z@DL2rU(s@cwS#V4u2Snw$QxD7P1=8hS!5@&v*PyLhw=!EhboMGX1-#a;<)(Q6DgDP zd5*Pt7GzG3a){VfHX(A_$%c?8-VD005PXk&pUYHZQ2E3FpZTVlQ8$rGE1q6@`{8PG zZ@Db}a2T11Kk>m2pK64paTeeqwPh+YA>5nIKM%+>2PqnI*cD_RyU#~m{vO|b_u{Z0 ztiQDJ3_r^uS~A6$73My?JS1Y^76VL^!HyKxg~~XdZ~nR{Dr;d$t{j_v8A+GjHK}<_ z>0x_HKy1baGh}r^g#TIEgP)-9tq^UPP9M&m)K~_aBJe{es7o^hV6&tyw&_PJesb%W zafqR~ABo_q$S8VPXH(Hw!EQjbz6`gk*SX7=12&mIMnSZ;`MrC@(W5ExvWvXvH3fdU zNnw_!WlhDMBnKU#ED2=`;J7OOdB4O~V%&s`XFwjH>N)$Rc=4pXX;6R=C^HLMxHPlY zUrR^t;V*97-LH}+h}WP+S>+se$?gp&!|TImXFg(F9!8}BTi5FzLwUnz^WGBeaNY$vHK0;GMXj&X|aWJ>ao=O z>w5cJ((jXU$*Q{Gh2pUEv%uOQ`yLB%!vY1(WH|0tLq^n5zGNl>A^l0M%V&>S1=l4O z#w;TKvqunwQA-4oln-LFb78-&?0C!P%Z51wqV{POFU-3yz?}M*eyAT<%mBk~fjd|0 zNrDV_eV8>TWC|Fl5U-0YS5<@qzTj>Y9X=c>v4Vc#LjG}-MAzeMaAF&4j3P-`j{OCm z{nUM4VJ_}ey6gQ*N(C=6!bml5*(7GjK4CycGdt<(AJXZ3P47i#LHg3Y-*LMc(;9-d zT)sTRfr(zmdI2Xae$}!TFb@Q>#gDeH^tgnS3hJSi*~)Ati-Om!;pA=*A}pdCSiZKv z)(Q0MWI_x4c2ycJAMc2ax@Pk0MPD8+5ZD!~ek_ehP;dTo{gv}jLpoH*!NuLNx_ktm zPlM+ZGw4r86NAS!zieKS4{>ci{&ov|ntltw=dnq8_`U7b$jJ*$-lBfJJ!E{oKg78F zbdQbf76@=*J9>lm^Ld;6@@V(BT;hcG+o_SeSQ+vg(98C1@yiK)TekrS`rM{ainm#P z{%20Vt_q#e7<{m=i0&S8V9!BH{<;c0yCl|<=XX0&p>)c)MWN9Bf>(^NsL}BRjtrm+uMr^5f8c(Nj{*CsAv9%IBTQwRHyXhK ze=dsxsD<9K=bFW4G(MPAHE*Q>E-F%sKJW>~=o2tU+9$)G2VTf3%gk@ILk|0j$`y!h zJJ-C`8$gk-UBe!IZkdA5^77N@32)-?Ak}f&Vx?x@L)?;^FY!1$dAT7m3)Tpi;m{Bijkv8S8?@BxEO`%F z&mm?3_uEMhSSHBsaFu&_CFrt$-#G16l9hW+stu!df4*?+=i{qp zRgh?EfiIv{wK|+&lSE)cW)ty5A@}Bpj(~<`Z9v9Bf?gHTkI zi7!h!GM81H7J7MT7f-(yANk;+XAcI+YgQZvswKAj;uU!~s18JFdQ&z1np#t|7-LYy zG>opYwI85q-C4C>kI1wt4=CA-uNWA%7s7ZgeRYwgwGI2-mvwOp`yr6j1G#paI_9^7 zd@LSKKBVYsc~xG9*`)~Evgdw$6-2GG7#G+&_q5+2?oX4f`@F~dwS2E}fZ9dN9U%tZ zI(-uV~y;W*wt~KyZmBAa-gfOj~PJn(T`uaT_Wa3CzV>{A{&|QQx`t zxsQgUEK@*CZ~-kcb6|?)gR^dXY7ig(Z$okS$$=D=5Eu#uL8YSBI+j*k@zoq$0U#M$ zq7uz%Lp;HV+o)wjCRXNDJ_g*vm^JtkfqJVNFiPVBez(vd8v(sL-8Y!WWJ{+0t9L@9JM;8*V|9QSPeYSyL!-=_^PW{g3(Tb@)fGb!SaNj0j)F*Z`F1}TSX z&{3Z4Od-;o&&vK zQ=J)_5brLLLY^v42fLn%Ws7oZ(ObK`6pNMb0T3BMph~%Sqqs?`h~|M%yodZaK)Y(- zVB1#a_;~r);~1D5iz?lSTWg1NMXT3LSU@^%Msnr8HdN>ahu2k_IewvSGT{jIs=w&TAY+4!|12 zvr^;gk6Kez^BF2L7%p?l{?86GC+bZ+6;D$HdgZ|^Nbm&;s0ROJdUEf-Dvk+DXA!AM zt5m#KFZxhx{3sf7F&m~z1db*oR~VXm_7ZZ?7j$7tbY3VM;!=EFA1*U9A1NfglJ2>w zqqP{72v&;xae)Hn6+4TECKRsMq^YfMB0B&4U>C~P5qG^g%#Z2z72+?p0Sn~J^hI|` zdnu&MgDXsdft<=$iQ1rQQx&DA&g&cN&c=~_fHT>*k1epXdDUaw)f$wOMi(cx>l*?g zSKZ)fomqv<-^qa;p#6k)=6)Tr-{j9#4!vr(1+f^QYmCVy799I*7!l|Y*p@_JyVICM z8)nu?_aXdu@5K9$AwE92vI@-Ix>&TlT>g8^@zEdqAZ3kuSYg&4-hf)m#m+gG_gnVZ z;R>4oe$~CI{AEs7PzJfA=`>(L-HDutc)-%8DJR1f8JX*Ef+YfiXLs%4?G!~v+AduK z5sMVz(G_qjPK8QzhT={G%{prVUP*9(cZ%bXkKln;k$;^vc}2wl>o610Fo#(`DXmZ? zPD?QfyU@sQ$m3)m@L7ZYad1DsJxMDKo0GnN78hJ|P!9;TS_(DOQtS;&{gef_A!$N0 z55Y_ad_t7`1cto~9~qjwP~K@a^E*JGvHs`kf+$bw3OqfUFd*ctacdpiZ`)z|#kcf8 z(^##AOGHL>`wtDPt#^o|%>*W}%TK<7Ci(BEP+*)Bk{|+TV#aij(=C+teqjrXZxzg~ zbkd9CN_T)7^&tT4LLx@0KcM7Eyn#7%vxkfVJkm-7Tq7%0&3&+Dxs~=O2$FZakQ@SZ zqk1-CJ)!N16bhquIawED1Bwo8^{oJ&LgZTa=a)tQ_V@FwHhV1-57_|(mg^>{5od?i zIuPIyoC(TxVW$7GMtj;2;46zfXbl&Y$U66ydHGmGnP-Zy{vO?tw>2tU7hNUsgMM&0Ql*4=d}-r({*FhQM|;oPRnJmyCln6VCs6yTbc zF2|KSDr5#j1{@?4KDj^^2p1;&eYq8Jwsxb1UrWGhG(Gu(!;^vs=STFqIZ@$TZO()t zWTjr4(#zMZh1p#jc-E393Gk=NAASVeRcl~`^L?HA&^@?RTMjRVp6r+f9{!Vs!GT zH-Ayb8X}gH%~a_RbzyParuHKP*`04ic&VYmaT7LF)_D)_Ht+8ycPI^}aufVuVM-g1 zS5dxUnSdhZu#^5T(h=+bk95Ss%*@XIzez{lDO!=)ZScd_psxtmGI{h5y$L3(T^snG zvn)kLwMbM&bdyiwj&+-tb9UR^bfdnt*?wzS+xH-#on#;S6O=H&f+Dh+GBk8qP^KtS+N1uDTYm>zA3PGM03ef6*QyAK4 zBs%z8gIz5S4FEbO6GhcKyf%}A$qFY=dV-b9J9$&*R8II;m~{gNRct6@sR+Eg7zRue z3#%U_WSt@HG}`#deXRO<1nv3{($J2h=KXe@FqIA~!$jTSG70`nJ}*gOij%Zz8`ZT^ zsdTnXdO?~);tMvpk&VNPLs*+ONWoU-@Ij--*la0{1cu=jy$-tx2OJMaEiW0cy{{~! z(gl5^ISHH&QV(Z+)Br@7UCq3q5X^*DaifUTEvMhcwOa%^R^E|e56^hHte zqGx}|*jpVtxGQ}KZD~=B4|k@>g#iSLeY?*?8$ZU3QZe!9Q@ii)zLL7_Pbv8bYGkP6 zyVS?@z6aLdGnIFWO(qtEj4Fg*%HR<-@IFb?Y)vY+#{P=CTpOZ^>q=4!Qg6HPu$wP> zzV~1FUtbp$+fm&gZ}-DB-QTx@-!J_wf*&`V&pAQ9pPSp?udmeKuber8e($Fpu`O?r z&xcQo?{0@ryCu)pzS@4{(Y7X^uQ!74+jh6}-95hTqnB}u>(hue%hgL_pW}}RS z%v8hX=bYHPq#`V#EEy=2MkaQGU)aQu<|&sGr&+?frN^!9k+7Kw0;Y(o>n7XT`ku4Q zV2LY^1)-r5s-*aF$Uj22R4VXB6}>i!7V*{XlQ1@5=1X8}-T86w~&|rq>if)?dnd!AEz(m`BJMY!q%1wJS^&i9E%Gm+hSrVtJm(D$j#af?!+f|%tYA+BE5v1*SRDLE>;G)D-$x2=TMGkBp#nEo3}ChSnYh* zv|Ya%>BG)9>^oknczix@vQ|L#emkd?xESMbf(6vQcuNXw;17MKLr^wxZJb+zz#1wz zw+7ztpjgK~?Ls0H80KRW;s~QH+B>;Vlxhd%RV1?3C)a}SLmO7*8z@H555rEygXkNj zhnAtG#D#jVqby}#gxOqsO-QNbF|}2N*!^mvZeBPo#IAi~0!53Q>Q83+m>Uw48pUE# zOd&5d4!*bF6P{XqYZKus=qs2^d`c3!#Bwz@nV7iZVleE8s^aNKDa8&&eSd z=KdT8R{-2Ul^+x=Yyla$r|ktK3OLC2dU0*!{(eT9&GKc~(8vBHi*<&Fe#-XYTd7!Z zV9930-mD`{;wqXu=>YMzCbd+Bt=Y2HTjS}v2D=xe9I3Clhu$<_@+q6Do91 zZ9(vI&tT*OUQc(G3Z(3l!j+X?`Ggu@ZJ;&`T%dGAeLGq8>7_oxU5jRC3R@8Xsczn@ zt%sbU5oPgqPJDfW|0ZB)^l*Y8{5hRGhc-VCV=ez>abj{*nZ zy$dJDG^NKMwK~=XV4h7&_nS>vFq%raxvSo}aO;o*yWDUL8f13`0BIV*sQaijMy#d5 z<*X1@Zv1yVWe7 z>NH!nYa_>#Id~#Wj_bdErJ4jE5mq!V?H{!}0RakFO~_^t{nj1<(u5DA;CfUCWS+tI z!jma4%=#FV>{@+Z{B;#mC1KsIx7CwkhO>1~fxbkK-0{udEO47O0jIhvc54AukdC50 zyF6*nl8-H`_EjwhM_p;HhgI%VS&+m~u-0d1`|Abw@6{N50<-O-XT^f?2eA7dzb4%KJ__jYSZ7wu`kru2PZ`KJUPx zgji3b>x+`{x_tXFD%Uhd*k}eE(Vu#phhzU!o+{2FRe;9XKBw+n`MH_TzFZ=`1z@5lq72u`qAKM$AHkCL|0Ze3sD}0kdL_cxeco ztPQg9PcFIRl|BA>rkJ-=%6G9TR3Wq-9dKEOJcKIphVCXlIyL}^h)*5n{udVJ@j`Hg zps9keNZ~%-S66LI%2Q>7IJj4VO{=D#xb2ObXI{J|UUmQSr|mv~@}bSzP>v;>Jsl1r zUXHFu<+XZ>01lyJ3d2CCgcoIx+@ku|WOY9~q16ZzB_I+^c85QyyB5Q#6!o9`vlg21 zdV0G5Y6)+t?N)IjvBH|NXjV>5*fK~@CuU20!HcGHh2hf6eeS;&^P$K(DutwU*T&ZF8ncu%!qr!P-S3;MInRAOx7R#0 zuM1i4PD4w$S+W9gf{K#;{NdMR+j2=ZwTIO;qj&}x@&;8=*;3UjGNE&*7z|wwCj>R}41>PC0DJ7E_I~ z=sUm6)b@*f#gZY!m6%?^s2YPxlx6qE$+Mds*%8XNNv~Ua*v3$+qODA72whzu|HiZ3 z)M8!hVsLH5PkywAZQf|%u)?26*`_sv;3H`3T>qR6hxq99s6>Q~DG?R4Z3S9TuN6n| zBJvm3yG!uHXBxKp^NDj$DXE}!+7P{=-^WFI0N?4J^Ce}|qakHyEyeD?Rz+dOPiBg!+L_bK4}~zq01VGm!wnRp`)0G)G?Jvm8%@!6eXEEIip#aC z78mx~c{yX@*iu{Of`*u7S6=dUs~)QhaeLmojLW)dh|!Ij6W$V!P_)q>f^gcH3j=;$ zjTi=I`-N;^McVbpeUzB$!0!9?T<>gHFc7@SJyI*>N@f^l z9w|_HJDRzbn}C&3Ux)Pu-@z@K;nX&ryl3&(5v@$M>(VIp#h7D$P+_={Jn@SGg%7gH0eKf@{Y)s{j+mU5eb+RaO@`ZqvVXY0o)OqT&te(Q6 zj5x7oDZD*!j+7YOMZf7G(TUY<^}uK3S#E7_s@FWU$tI=_mkQj6rwkz&Y{W*(Vd09i z@#K54FC0k^l;GI=GC4NE*9D>~cO7BO^5!CC)bN&g=W%d3zxc{BJyYZdPx+gaz{Px| z;CFTER$V`?qm94+A|VS?&Y&7dLf9=D+V?RJ65YAz-})D-^_)0hxE~{1`X)Hu;)fyn(AgO0frlw zeGxz5nhh^udn`zI4!hTpZ>*+N@zFYHwWNjZ{o;9b7Q|b`SFo;$AWDnp z2^>$4!3A&2WP{{W}fZy4_{1cRCMUJ zYo8XU*lzTlc-TKPt}||`plsH>B~;yL8nn|98s25%%cG@<3ogfMiJO-a^HwI{P{P7)sO)X2>)UwBL>c3ol zzZ1_;0)|wu*Gdph-@q9osW!Vc`ZbG?X^%1v4!77TEy4dg^c_9$(`5zKFvn8Lmt0M}UI( zDwG4P_ZIo{@eniH&9|t02qk{>tVi*#%)EA4>;M3)!{hBa`s2l$y0TJ*8*Pj;8DKYZ~klTnd5Zj5!48``Bz zM)6vRhY2=Ny>1OqmNTz`y&r;ObtYq$$mP&+Tt~x(r}PM$VPA5{hb-;yJ$$h8mk0k& z9JNvsv6U~t+62P0+N=PvUMjffq%%;wgt{s|&2s=;mC`k1e|yj2KBaoS5=)@OqWevJ z&QR&DdV;%~e+rk=V@(Wj$i_)G*`FCvoC=;8u??0cvwI!b=XHkI|?-+w!!1N2Ba@0{7||sIL?&pEgOpxR`~g z=smNEsGfSt$i&t9;4EiaJKC1C088vd)_SF$8=%EH;LZ)Kpee+*W#oKzXN2CsSZnom zBh=_GXY9kz^}?Aby)Xg^S;OYy;P}~aT+>%G&ubruhOZS!K{mjdLh4_W0WoO{`98_C z*2=JSlv=s59udwdxUt)_c%In?Il7>bR;Ed-Yt)b=wS>mD1fl!GXAY`-d`tZ|-lxj* z7TOw%JR*~vYr@g_L>vaI)C4$YZ^*3%Sr}N`y17-@8tooa!Y_VN<)uge<>QNW9h-7& z`9IX$U?CNs;1OFaLr!gu=Op-Z2A<=gO7DZM1Zl*YR9m&i6M!I#KSAr~wnC;Y}*)HHFM zWb<x zC5NBYbA}t^=|+Fxt>z^QZw@!{>&@SO|2RArtItLCErYxCEF}VgRQl%ho%o?L>lW+5 ztId9cn<&N_|Kj1cI`N>GpNDJ92rr+znH-9dO`1vE_aE@D#FhAguNv_c?33W=gWwRd zgIZf|Rlh|}54g8Cv6$=8H%5qV1p=fX!oe( z2Cnn*YYsnJ>gPD!mO9R-MlrHEPPK6$osR(ShS628)IR>r2=KE5V#2U}Fzf~2MjB74 zUUNqfU82--^C`_%`rc#s^jjJhT4orA)Ic==0gkdZUrh175O~yGGeY!KiTxE|tpxjM zH^NJ5`R297p5p&<=k!f{2Y7T4K7iC1G6k1Zf+2jPhLQQ?&*^bBZ0TP9TGgyZ`D=E8 zf)o8%f^85%506S5&eFn;i3_Tj41lVfVEbKi@lYI*6+a_*OiNLk>!5-<9|Ev0&sM>i zmcxsazh8W$iUBO~SAh4nz4cpmzPnq%fb<}~w8K3Zs8mY1GC9QW)#NR=`F7Z}^DE<4 z%G*A5Ui|%;fzM979Tq~L8y69^VFcSm3tErgLg+?^3)Y@@fu+;uY;1>@Zu)#Q+DoV0 zMmw=pRU--Z8cddgwFEU7cp5=9uQ$0<+_V}#c6RGrjS+lr`n_AheOnuy=g*y9#WfB- zdQdw9!!Q~MzEE(*s-WN=x-9*h5mvmS;bzkiVf2g;^z@!Xrq^vt-SW8mcjC4bI_@kV zpzdV&;+%UBJOrB*KJ7dHMciFTe9^ZK!=#PDO2@ss`Bh$}-tl`+nWXby!mAz0k{pNm z6ESyadyNi#C{eO|&3B8n*WCLLC<4l?nvPko3Zci8_yIs{Ge29G6*F)?{aoN%lYAOD z!Jcsr*fv3f&=VZ;E(h<%k)d}vAeT6RxQ@UfaMTnU;&pTCho@2gaFd!F<>i85vnV~w>$iLPLcm3H1MWJsj>Y{O@6{|M=}4@_PUJ-=BZfAOF^j>@AVo zA#xmRpPnBBfLIetmKAumo(6}rxx@(H32>b zGVtBS@su!tPr8l>C5XP$$-Q288>_6p9Uipn@wv5c*v&S>uJVqsEAQq$bqBa`z5>?t z-oYC$=Jt-TU9y8TWQH+4mhx3G=4%n$Q=rc+b+>Y@KG=GsGFZ%w{P|fE6I?wh7GSTh z2q+P)c(K}fxMR~&*y;qWOZvR{E5aI`hfRm;-G<6#1Ax9d-`lCoiTB|y0m0R?Pb^4D z>q84iesOd?&##>h3_%tbkebTKFBYTNnm285h+wv^%GFF!EC?>=JznER*kv;U zK5-7?p`|ZIkhu!CoLg(0tapKkzDvE=S6`$);Td%@)(F%u+ z;0{Dm4vj!QvH8trEn)JUQB}@f^@O zxuiBc4-Z^th#;&xL99(m`MPQ}=g;oepUNeMR5NAAl>~^*;S$=jj~#r=1fWwo5BH@- zbd<9w2FT6rYbEBojE%$0W2l*|sXAoFZZ}0y=qCku*Zs_0f^945t5!v5KGdi6`TcPp zdDQFV(Ru4oEZqXYUF8-~8#x|_=nNUG!?bzWyNXuU5Qxs#Vm7sy!CT>T*H^5}g7>6n z`nK#QM=vy6FTb0t4!rN%yLkm*ujcQgUr?6&JolFZJZs=#xR3B%*1K*2ju+9sz)5xi z*|(-P4SZ+`>LqIda8EK8@;F@aVXd7lL#{d3So&qEd zk<+_^dbQ@%GGy`MTWRq?d&kz^^GMH1ypEweSYP7El`|aR5}_XZ`AZS3G~%7?5U(Jz zT}a)>XvBv)K6!sW{Olv)eCW&p&N37MJ0(F5WnAEKwGp{bMzAva&H15#02>QoXn|7* zfHhM8G)Xo!@UqkDMu*HJ#Swv;5FnF)s=P`GJ)ejC+|VOM?4d-qnqWual9GYjZVPsYH>!L~#F*X4c>az7Fn6*92xM_k0s z1?L@>RBV=t8CI<#HI=h4v#{HB7E^Rz_KnJ(Y z{Mhb_KvIqTtegRSt65R??RqmD=BgDq77OKP#9NonXICJ|DNdFNHlLuSr#Neyzx3Sq z?f7MrcN<%ySb8!Yz;fI57>ygDpfz}SQ2ddKU37=IqVk}Td&@EAbEqrM?(?}gAA;)l z+==Ej=SI7k;D8|WZjYy(J?ZqR-ADLS5xCwA0?@K}Sg`brg7*t-uejU;@%w77OVr*K zG)z~5{mygUvp;W!qhYTscQp{bC&1lw1uV3l7=BS-0aE$wC{^bZjQ$y5Oj?5U&j9T; z8eK~VzxH`NqH*SmRk_UVm5K0JY`<8~6QJCls^LzVh#I@vM|jPJ=43q$@C14Eq0iB{ zJx(_!CKj~ofGvHKogHLy6>t|gO41L&-j6@B5mIrtKy)6bd0^~J3ytS`YEq@0IEfv~sL1Rl4P7wh(la12F$uH8En%R7v)|G8h_&#HzPNZu1+fr|OL zk_3jsG|?xMRd;~@WO}QQn@Jo-u87ZU)>v>!5`IS+AkABJY9H+tVbv)-TVi1DZRr(@ zd)ByfjN631-UyXO0xV?dWDo(0!os05G7mAYsh_cG*o^?ZC(Yn<+F3iMsB$vU`IVqm zfvrCqXOFd(5e|cLVQk3!RF&%*46>B-s&C0S&9r5|^mDlL$qRF#2`d_{kr^w@Sj{2J z_XbrkLWt_%+@r$I!}yo|P*(bgSPa@7`uW@e<7w)LN}q;{AtR!bG6eRqHJJ7J{l2tZ zsFLw}9(3-HuS*5!uiqOr016j6{Sl6j^_MgF=*6o|GO*8b9tSr$nX|Zroi83^<1QL- zy*S9f7N)1SB$#*-HFX4+9D<1^u%)cmJDM?Ow;Ec^;&-?E7QI&#fQIbLXhysxQe15* zf?P1#WQ;1XIl;OweV)Frdav&V32O7r83F}lChblhhZ4>HEZ2>4_8q4Fl8LmNCx^vi~1Z}EM z3Z0^rpSoGa3itfJPVr>#Lr1@VzT`6ZbYA|vV=9`HlN*HfM}mbK6otyvbhuvYOu<>! z7-8Lh3evxajRy8fF2H)L?)&;a;E?O8eo7VVFmNt91!YfyupRfL7Q28~t`kv(B6D_* zukq~`Ut+A3#{<^=o50%x>#887YXb(LD>8j^|__z4n z9%?CHA>MTfh;hyGNKv{9&DZVHR~){gBZ6lZ21iTILwq>7-waShx`vU?EsUoJZ&^nK z%{l(Pt^{;7WQaYs6<*;62i1eoE5u1l{o$=U0S{o>w%2>zRgw{RLe2PKr@-VexS#Vi z7|D?#fa(r+;R8Em0fMuI+PEtDjd4ACBIngjPe?I`rnZ%H8H-t3q7>q*$0Au6D+ak; zmS|62HOjuVdoGYWB_1n(HJJ@V`!ltO@NF*Gnp}5-^i=SJroNusOcoYDN zIo@$Bb6h=Mw`RK$k030nX(SFIspcMGs9ZH9q7^pCGc0X0;xc!RfZ1rY7YdeN>?0t| zVX;lVy7zX3!Q=r9JYe^s`-Q~c*K3~=;r*5TASKdWA!G(~aa>tsadm?kn4Gk$zIj_} zEQ@h{4AV817jyG@#?Rhdka{~SUOzj%`7n&y<1i_t`Fi7 zpS$RA4>RZ)ri!_0m?b|+ar|q|{jhD$w)L$Ha!?4u80l%f^JJ^_ygSH{;>pd#YNBPA zf}vu4hB0^%#6tF$q1YhQRI%Dg3~Ud<5bTBcv)$U|c2v$b;w?8+`_){)B>(NIw645z zLm@;L@75BlfD^QhHL#6<*E_MN%uoi7xqkUze4)s%;Zh?ArsFNHRUxsOUW}pq z@XS0>ct{da(nXPYFTJR_%+*u_-17h9)1cZC_5oyq_Y>L;!Ak9hH`oJEmsXo`xi|!+ zMF8Uz-3Z#zm_e1Sm!anPb#9zteV1?*{c8*kB6D+YYu^kSlGS)&D9~dp+1a`>kA?%mB-m*hMvv}caSCnT9_a-VH}%x200EeFy9?kLP3?r!RJ+vt{%?Jdv&o1vCFSfjKOUu zgDJ{txB7ZiY8ld0RTVzT4yCtz-4=75T|ZqG2>We_8zV-BhmV3XwD?aQ`(*%<@@{Jf zsQSg4QDl#$Chtt}72fWPhT<(JLh$KX^@hHWgz#t9EmoT9EgN+AY>G!XzzxJ$&mSGq z+;_6Yy}lIyfY>t8))P+kv*2e&lY#3^IV#y6L2K*FoSE3Vq~+MGdUewvuhFQct7|l- z9p~*{N2_Sz>IyRJv z2Z_v(QhqWsbZj`|1Q`U|seg342bTm-eVhEjAI0ab|6KLc1uLc${z;-31(M9`9s% z8Kdsc>a`TUYp5~Z^4gS~IN5lQDGt-t1tGdMGDok{>Q@LP0&dXpXoibLS zt{+|!&YSYTxsE-guM3XMFx?($BOUG|Xsa<;N*xSVf?)gdWgFf#_OF}}PzXwlV40|i zJZ&#BF)eM7$B(S-)kKkDHL+zqN5Q$fW@S8hbtP2!x@-O-*!!mVAC1e@(gvZDa30Fw z9-~U}xi(~v@z*(NO>7h8OQL)W5=5|`v=!(@%`PKeik7Ku)3hOe%~EVb2`!1>!yB+< zWl*Tu!{lY=z2sKM40^@gtW;*$RK(2FI@E(nK+ezCwfERQqNXj0c3<5UyNc_K)eGla3UAH7r z`v?ci*1Kx#orlFe2==A4t8bmO^{o!+C(Xq@+nFE%=_LR}e>nR!Zo=92%J*(!@B|>$ zrP&)X(GJn|GiG9O0Iyn%p-v^a%2Zl#&e6%TKV!BeFFT<1?GWgJqIzL^9xh6)nAnQ4WpDSYun{Q!ma1=WXN@;^(SKBWG{9t8##)di-IO0UGpA1qpB4}&A z42_I59cSWm5RUC|0BS3h!8+t4KCaH=b2S|fbgv8Zu1234;DaCto$rN4%{xF}l&B;! zQ``Arr?FTsh#dyk8C=iSvlqC-RjoPpiA>70q59jGT|V)kODO`OOKO z*7l3fQ8|I#XFVDXp`Hz0qROjR<77+7bzW^UM5PwBsTcc$6MlyzlMT#$8Tu3gO?L=C zs>UgT;&Vc6dlCnO$n40^YMf)jux~xrP?bYg`%zQy(mYGt2ZJAf;Qd%fFj{g1jn)VB zXJO#{TD^R=!Mnj0rQM~B_8)Gz!++6Cz&jB1*Iu8g8s(Q}V0D0swV3X6mrE>Ksx7^}a)}jt+8cBd zFowadZ%wzenPH60!_X{xU#y4smhDGzSjpkrx8DDywxe&A+1QM0C`C~M@< z7g5TsaJB=0FCwL{yQ3+5-6fn)?H`@79BH#1crqJ3DQ6L^Gz{*Y>SnM!dH5MNEXCQ> zn44;}LRhs*-!eD%8A79Vj%9HJ#0)Neixi*R$REW^+5=N5AZbhITv-i&f*>G7@KI`3 z9>QTZS(L?btuE1JMX1AWh9USkskLiK3UZ~qvUec)t`W?waSblMLcer3^iu%j{G4~w z#P>%C)m@Cds5NDPDwCP)TDprx$8JeC?lN_FV0n3LLDL^m4RIay2Jn6m%pyGo8M zyJ1)=4nZ5WoPF)H^AE+K2vDzWmoHOY7=1p83kXOu(lU5*%A~&ReSd0kH*!cQD|z8A zeYq`Uhv#7}Dv6H&f3G;X-M4kkT<0A|ig<6mB%bdpfcMFyO?*VnUein9yt97dy(D33 zCDl~4;GB8X*H)_3kNeadFX<{&r1cjV>uHLf*@FeI)vc$Is*0NwmJa3m1TL?n_toWK zj5>qfZOe0Avf&O~{=T2WcAcI2skqUQvDAADHkA6LBCjvP^`2yo@nH_AHBGJS)WH3E zPdvDZNxa%QTG}nIO?^LnBOgAnndHDn8UcSSSk_eeifcT*@!s6Q1sf`^xNFow=%uB% zOzX-r@MYQ_QosOT7cGpmB5XA+6yhfWmb$j~;^(mLMrEF*XaPd8a~3c`+CRWgTO3K5 zLx6o0hHWiq(7%-gg08n0q0-a#ut$*`@R?~3hf8sAirMzr?Pa*`BZ;^}A{N)>688nI z%Np&ei}msH06YarN}7^>Z}?810^uZ-2m9P%pTi8FX8&`C4Q6Y6TK7`@seZUO3Y`1a zn`9cL$FQM1jD1Xcq^}o2Q)jRAC>-0*Jy_WU&6-H;*ryhDs_x*6;YWB$ZFbiOdzF$8 zpM%_Fm9gEENcXqXsK>O986=EY=$%STM{zT#*&s4ftvGqs|f+GvKe* zqBAY&(OgVcg6-eQw679`E_z{4672Pd?{nK^mloR?ccTwPzxCjMzeAS>ziunPc)A!; z%CUI+=dx&3uPD?5_H{bt3TF{_?Rn>s+s${=u!lol@{L;4#Ti>WYuXlA!90xIR()Tz z*7@q;x98_9ZHjXRlbrnj@ifK7chz1PRS~rET&~12EgnoJ<;p@vaF^925;Ztrf_da< z7D)_5J5ufVJiZ81ySq`77A*8O*eZC$Rvy-t^RQH6j+-fy7COo=Z0h;CW`cFFEnDB8 z+SSa1&8)@B9&S5?jaR>`Y9IZ8!|)OsJyDQfvn`HTs~MJ(AJPb}>k|Pkj}b(q=v>a7 z^HNnNPci1L7SZ(_xKRbv3~x-xbCPFduy}OW;*S=-kC7>k56~t+JJkF<*p{ z9MXrxhM(U}`e9iZM;tE6kZAeN$b;*~U14LtU7VG2&3xCcgS|?M&vIMno~*ll_=*uW zCRf5~@3rpXmEB@eJFp{|_`}nR2o-sJG6UE z=;-HD)HBXY0ZJ_uAJ)Z>9?Q>E->lz~a==ZjMi(~f?f@}90C9bZ!m>;aA@%pJ1 ziPT<}4$nH9M&$}Y2hSSPB0g-402@h0173Ob?vKzirFMZ;tNuVxy&F6!dh^P|%hbHB z=wXFC1ePspy&LS*hIcdYVVU9_6g-S93-I*zA{3?!^0id^Ltz(-aYURooh80?$|no0 zbS9lV?clYqQMNi1q2}H@b!J-^h|^F#GsG!D^dc`dY!DnrI~n{pUIe26oM9_}gjbFt zx~-5qu|rNyQRpVyCz2fzT<1jOebW3*x2~0k$J9FQ<0@7MTQ{C8y?i35r7T=0!#4Sw zTAD7VSD0(C`d&Au{gY(?RxM-{%LtyD25at5hNY%03Yz%=lO1W9OtLPFhwLLz`q3HT zTm5JC?wr5KTP#kce``9m;#wyIy4+aWy{>=q&N8wi+yo11i!8iKaE&Lk%2n*X&!=`Y zWWJzUlxh1);FHDU0M#d7t>^fwX{+%HfNmoRHnzl0t-+HS;nHmM_MVDRYm%K_0b_ck z{xH?5{$43>U89Kr0q?dY3b{}Lo@)K~EP8+?(!LFtfqNUeMOf1dS`qX2&EQg#C$LlGtyr~;a-HeZ45EC<5j>8HJ?N$>hA)KYQoP-c4f zv-8fckzd|t{MfE*Xe!7sxWU>QmA!SXnH%-a@%3y5syRjTdWCmfbtmg!aW0;GB5gl| zfh%mwfju&2e%><~eAeR~0B3K~8wHf`e6}fxf33jcrbO?#lX#VH*1v7Jkx%K6SD zBZ3W1r-9Z8Qt|@qyL{L0)O=fQ-~5Ej|6@O$fYB zb*3`hvACKx1lKP>aLBh^%VY!$#7?mo69cd{=|A&SUGT$RE*6_EY=z0hj?a(f6x9&j z{`==r9WQ0;x=S}u6Z^xqZZLc2!{*s90No(qO*QYg4Q*{B$Xr*g1S@v%i(sVB@!ZZ% zMs%Yatn6IE1vP%jXx9^mS35iP?irm`yWB`{nMw)A`iYPQx;luA7@8ZMbKY8};8u2D z1a1r@@}&1DBj^Zum|MKAjNuh-CX+s^MVk#@@%61=wb?-8i>!{ZbDmLgtJAMO@(A{| zI%;5i(HGZ$evWtJxFM4**NaEsfCI^eCmKwq(| zQKnzgz{*ywt4X=6HmXt~?3xZ>7LQA1P{H4Qh9QWUJS4)yzyA9ilH;|Nm)MuOw zdjlE2s?2)-(|{sCUAJEbuCifW9*QCXkmGp#ufZL-#bRUPBWwy>xh2*4VLDO!=Wxpw z;4IM;4${31Y)_SM)cct)!lXgL`Za>}@wpm%lpj`NEcOMbGbke#4`U$J7L_+TY=QG-_Kn z6O{mL&#|v?H4$OjV9j6{*>{FcASf|Lx3~6@Lr)66Wc($(9;xZaGJ7B!SaT?-w%Z5Z zjk5(2t*V9{PT@z9RiZ2EImaiP8j;=Y{R3M{NfYbpc|9|16fW3g3wmc$JAP=VD5EY=X5Mz*E^)B;rsrv8>G$B|qvu3}cOD-347DspfN>l$gX_FIJd?O! z40z$47q+mOAe#?ovsbN(09RYOXtqphuj8*-i#wsj!AJ5kwM+(|Rp2Q-E&^-ZCuQia zxcuV$@G{p_ehKfZ@Hv}}!61hUg&^ay*DP2y=z@o5qre!YDK#r_Zwmau3BUMnGd5Wr zOW-I|Ca=$Yqm#ElG1rWSII-A9s_U)-v%>N$J0ET(A^?ZU=&~Ev3SkFKg>^$$8zbP6 zvwKTd1RUn;k8Fur!!iP#?qnk@gnMbI&Y2oE;966h)D~9(w45{N5vg6zj+cbG$tsLC z?`zh#KV6;6$b9g~*{(C0726NBiP|0D9x-p8C3^L*h=8?^s&VZ_lw!G2=HbiS0Y1lF ze4$|uzK$p|%xvdjd7BnP7vLPzG|9;Y9=RF0;QH$T&rsRhxtehk^ZN0P`bPw`tX_Yd zNEjMNgHKBk;F*=e-993?ZQq=i<@@EA@Rn&Dtga95BzN}>Gjlc#k=az;cG3QEHIrXN zZ|B&S9Te;(YKNt$zDF>(C1!86o{tDc9BS61$gVm|#9D(`0b>0#k<$8z(2908gOB0i zN^{{G>S_c>7lT!3fmd#eZ!mRu?*a2dFsd~F@ z1W_|S=qYQx99F)54_oFSRG#cQd=-m2h;n^D+aIsLSoufhH>|u`FKY_SxqIut#fOy6 z%93pO?UHrrC4fzKic3uNduv$+UmebQ<#}etyeSyC`SwU0jS-Pi!DJwPt$%3kd>s$G zlN^2933_c#Tv}AF`~(Pz<{hT1^*VQ!he z5?@`ER~f|enL&9c1PtI9tN&TH>H5Mnu9^iX6PIS+!AZ5lw~qFW*_P9_40!Atez=a@ zJn1C-Tif__qb4%~KfRI7FBL`DoqN!I00+UsMa_B*BO`404O%kSs61RKVBD|>=5hnk z8!tlH+HZ>H6E>x7MOZmO4uBLD4zPvwI(B66^Xi+I`}!WCIOs5*Z|vBVoeXu`v1N%IvZiDG!db@aAMu zU*z@1)*Wcvuersd!>9s5i!}Oh^6izaK^WAoRTa`QXQ6+2k_j*Gjf}8*Jl(`RoW{eO zT!#od=B2x!H+~C_I-ky!cay%*C{+>qB#1Xyi~z9~^PDs;z-RJFTg7ia> z7rgExBiOVh`eLuO*goPGC5X=9WG0%aor4CwX1*NA?u59R=kC; zrd2fV=bPm^WO^gxh67z`JwfUYEVZk3-fqw`wwDHq$w^$^0;-PDP#gS~e&0Wg_UYU5 zF*YY|nyJ34Yo9AU*Cs^J%7O?sl)+Bk$WFzV$uQ?l_ovEU*YhRZ_Tpod5n|R|v^lap zc8xy#{WQn>6~?z)(cB!F!D?}MqRV29!bvdk3b6O&_3i>+dARzLLy*D?Q9E<}2|mw* z&&?gay+#{}X>Q>o(bO1HA4W#TF1CUbK|wogNXMN<&;$6jbK+<2 zFC_$-5imJ3G{Z@($~Ji#0m4*PMu5*@glk&^9Cb5WSikkcqRp7tm7)Mkt4+7)gGH;9 z^z!53$Rz_ShuYs=PGQ&_6wG-D15RRbYA=UrgNG&Qp4hl%)em>lfW@E;xBqV@Obdx; zaL!U4GcDs5=g-q7Bfw_u_dMJXJc8vii6Cq>;FoDx$}xG3&Gps6a_j4$t!%03zJXt) zfyLJ(Wd%*YyTGmY-TTcOI%jVlzC!dR+)ivC)!pm|zS?95-t8;EZHMbpR(N6ih*w7B z9aHe%8)W$?b-wi_|L?@<>7jQkf;)(YBnagaSOre+yuUI+*Xp)&Zb*-O|P-DrQy+Q4LI7HhdbvT;Ib=se)w2k`D|9$yaoUDe=7f_w()gjPSaiR<8N(0hca7} zgxjTFR%y3jKR=l)=f%ixR@gXJy5#C|4F)-8tt^H*bk5J!#aB5!;CbuQP?_t}`t;)x z(Ro#!hZK)h5%KFhU(h;(!F9~sr1S+Spuk#;1l`s%?|(%8`mN`kX=zl!lTNSSi4&AF z8ouh0jn1Lr0=P_!`r)=+@uvjPEoYLj+hoVj7MydX-QvlVza=2H0W;b6KZn0_fxax+O_Tk+Z#pH;jS0(-$a)HU<+y@O-y_Onioue~YJ)jj-I;BK#m^t|r+D?oT{ z%|spjf|kn(BA+@H8TQH@=aZ=bL3os%7CPzR88R_v$l~3@E$L)#MS$2R@l+!?i-xtB z7E4dKM1Mgvu9ET=waP5prLMoJ@bAK3P~R~q&)s*HZ{eIb$6GXngEOGk5b8g1w1(4- zxnC=e`kE%!eBHi-|0BKXb?(Y-U>_jw;5tA8Nz`xcl()?jEU=y5zs*pb;W?k}1p;KN z{l>aH6h#ha+)k;^uWeHNWj8ag`j~qtBM1W|aM0G&=mAE+;kM`l>q%Kx`1`esC4^|3 zR`Ql9aqDVeEk(ZuHAYKM0g%UbsrFN-Fa@!w)YzFV3l6v80Jae(f zE-ubLIUkfhIQi%TJG;Gdh~j$7yM_x;z6gh{#w?ylhoi6RI1U>ifxe!BJ~7@=inbSn zv{Ta`o)`rv*ds-;=B!WTUYJrN#*M;#xBLvrI`>&d8t zO;T4$M%d62$MkedDO5}jJE}4Q{K#Hx9wgipVR=I1=dYCzr@;Eu@5j^7xtYdsV|948 zTAe&xYGV;7h2x}(P}hDXkTZPMHbL&Y8XAddHBmFvoIMesJKpc}5pHQ{rq~(&F8Kw2 zt#4n8fRp{9Y(yBAGWb*;ZbhsA`WfDMiOYbk@L3lxYE`_?QYFl$bXrw;cD$N9*Zn*( za)>E(d=1rSSL0ahkvTxsFakFCt3J=#K%-C9X9q-4c$+C!-{tA0qW4zF^ZnH~+YOQM z-40vVg3!yeYe%e7P2P?c1gnTTz z=>zjacx#7o)(%#zD9~d{R%rJM|50{*y?roJO-JXw0ux8RtysA|S#*SotvyZJNxM_c z2apj)(Iog`6lzPNT}E=y_L4mdMsjeUO=iAzc_Nf%;u*cPYwKyD=m^}@Ck=zf@8VL{Nm zngxA8sFWwM7JXtrRT#Oiz~{04s<4G<_VcsxA_DO+*zWoqtxk9O+`jY`5BE~L9{Lm> zUKl@t)FK@$HnqncpV1X)M;GQF;o6}+L}GxD)4cVpe7lx9*WTvp;)O+?bdR+~`(LnQ ztAEqCD(i6HD04GA#HU?Jd%<8Q|J*qO*T|x+wx0;k=b$rn3zDIiqIE?WV=`=SsLvl+ zpQG28kMdSoN^G?$5G=s{IZl6>!=mu8c+jlyTDf+^(A)}>C|XK82b&uHF5-+_tMt#Y ztT0!?OP&H8WW4sJJ}uOF%$?Q7!_c~1@z}D9V8LQ02QJJ0sbN|px4~k%uh7sHO@~Xn zDQ0uJqYiX+CzU&VoIi=rDeoeJEkPu4ICi{)*gZ0Ko=rx6>5qQwi9Dt*cv&TJDOKRm zKHmbo*NPx3Ol;Y1(!}T9#`(ocH-jp<_2fm8x?w~VR?Zcb$D2P9TGOp}=jRZXy<=PT;CPR{ik2cOg2Xr zjm{K@d*O2~ZZ(`l8Kpg!wI{;k8KCh850|2bK>WDe2(3naSHx?;&09ATUYlp3EQmD= zS_24%;HR25a0Xb`d5L99hwGGh9i0)dlBN`KPFxR-9Ws~PUV6ym082dt)ul+_q#E(V z9K#ZmMeupEZVC&ArR2kYedW-iMOYX^5-t9hx zsYTHA+Gzf6#J3M*<;xGOT*=y4paA#oJb5ipHsw_o5g}ZMcQ7ZbdxM7nb1+;IK&-3v zOx6gW;}_W_+xn5>IMM5islY>%U-X%I@YPumT;O$np2}($2UvfBoaz@&SDaW!aAu_!|su5UosO^l(Ny9Hern z>-GB2?N)ZOG(SK#*5xw(o&&i=?j^*5MEck6NC+k&GLY{7xv+`b7?H&vBlb`9K%)I04cqudF!48J_F=}5fa)9+TMW{#F7JJ^^1@8ucUq z*V1O%ZO1f%{`6aH0^6l?s>isq-*Mf zKXJIze4(V+as2+(>~RCvikU)d%N|V?*rOI}S;}keM|?GV0zmnY(!^4JiC`(u*Ik-} z?t~gatmCYLMK_Ydj}D*Ut$R3@aO zeiqE@%m`qUJ=#x=H=NtfSEU7zoBkH^nYH2ChJD^=pE?|?0-_4)l+diR^- zM|jP(-@*VFIm&Yd{z{j-NE&_KjmxCvL2JlYrSc4WbH(d1Q zj9I`(ZV+(o8VoY%fpGFVoNzE@ydtBOXqc>pgYicia;1%C_tn4{{W)IkH-a{#Z0$5 zN$a(eBKCg9v%ILIj@X@G;;N`+aOjKEjZBbFSq88r@wZxRUyqvJ_4>&Gk7+dSO3nyq zb1}!6G5{WAiczY|^u)*Bq93{o-*Mn;?U9g`zb?MmiS^sRZzOAJ95Bl+z)t>KFXwEG7B&%KKV%l}>2ksnm z3w`nVi}3k4$Mlo#!k}>uFTO4p0w;|)xFG&{*nbw_Zj0dEgWnZ_FX_0g$-^TG18}xG z$-l`V_4Ax)_rf5zdO;Ht?dwb$KdHG@v?-sSuPC~u)kYHfY_MlTuLk@mdXW(YuR8H96q5zyStenGK96#8$Y*#^NV?l zFv2NEYv7|-NBM)q#P_RY4B)OjDJ4bv&4J*(zZxSS>d+N!EXtIQNJxe ze~ImOAWB*H>1EsCD?4zfw9#`~zS4YXn9Hv1f&b~|CI+hI(V;67gUl-1`3S=o?VD&c z=}};>U;NxBK#P6Hzs{ik<%!?%H$TrjX?1;k##wKkAP$z}M+-+nmUq~}!Dsy@>dnuy zgUBCZLqBf^)h3}-j>>!`9wHjJU6u}xO5<5J!8gx$+LjA5|Kz?hb<0k-Xj?F|@KIZ# zTiADPUc6Pm8j(_z?ScR4|9-En_p^e*i_0|37jdW`i5{Cy7$}YE5;sOTBfw%e$+2v@ zvfqF7fZY}{SB=-($B!}G%xoK`(0y30o#6p@ZAIKcy%Nu~a)4_ST%8KXa9MFb4O3C! zc20uo$KLlC{!e%{Z`4P4rIv_f1_z@wYj1jpjTjzcUqO~)VSUgo@53%ZW50)2A%F9p zoF)^#h!@vahIj5%w`T%Las~>e_wF7^wg2-2=PXpyz*$Q3tQy8kJF{9m8wNU)*tdXl zuLmAB_rnbw8X!uXV5>Z8rM8EM$*YvN-+7L5Ue)K+K&P6c{VO+&eX{eq;VWXOF6!dg zEd<{(e3QSZbri~M@5SWLl!@Ydz(EyX<}HsN14FSx9^i3?H+YW$?{Q;VS4IQ|8R2F6_7(R7zd8RRKAK-2;@#z7pXtY_*g@UCY)!n* zc0Ujbl*Te@7~^assL`B1ePGTeuD0NxS!a7tM-;s~FTIFM>w#koF8t=^^{DW6nzw}A z=?5Mctl`b+AK(bJA(VXv_F0$!U73L8bv<~t@Fvj$L|WLdp~O<~qF{vBy{!&$=zFs%O=#Ip=^c z3zmfo>pz{?o!G0aVnx9qyIYy5mDEUFd6(HSV)sY{hn;wRP2g+LleWHQ7vM)5*jm!N zxi?h$2M!(&9Rmy!4ugU%-E^ssFv{Cy#JZg62j-+B9OY-Ce?H-@r&{K_8}a6jkSSEr z1qauqX1REP`A!HN?7&|5di$rW9iYu^1cwXguQ&8Lyk71(yma|9mDDFq$hi1Qf{mH65Zr+G?sGvUGrI5hg!q{kQ0#bY6NS>eWo#$2?i+AqL_dlYpIXKe?^nKdd2m#I;~s zj$>f$0cInkM#(+jh9p>8iUg{j_J{)#`Jg)w=V~re+&S=pM zadS)>j|a~0woi)H(7&rql@oQmUBix}B;{x3J`UYy#0Q|mo$0S;_&+&I>!dts6_o$XlYjQU@=n_ zr!b!>1eXrV_KEh0N3|%r>1R0o6nZ@_>Nz?XTru#I9Dj|kS{e*nxjG0bXv$*imz-l;L!@5LyKWMe9iUjkF+88F${MU`$`bA2V7B3StKt>b%#n@@4qsL$L^S zw)&+o`#bOEY+=qDuF7hlT5QhYW6Tk4)KgT~MDM8WRL}6bM*+`S6P2N?mCkaTlv4ZHvvLBeh?`Kud8g{= z@GW4Z2kHsq<=mJZjSi{H0+}Wz zh}u0kRDI;LANoQF9N7Q-@eg|zv~wO+$Y}91{i&4V(f?40hz3hsg>%HhR2wRzbG1_IREdTzD4yzD%8#pBHHm zp~Bx8?|iU&`A~?f1&au2Z6!!>dk5k;X)Gt6AUSqc>ZV2C4Yx2PT!k{aj6<))An)s1 zFI{MsK1GHxc2<%SNU{iTkLX=b(CQSn@8WV@M=7iy=i8UTFy3Z9!&7I9dE(sB0|kN}d^>V*G1N za+qY-+t9#7@+b{j0aNZ(ft2iu1z&RT6jySq0pV+H1bxE(-i8KIbId-nD8aoO%{}vD z$2eXI{k_biQ$43b^5C;q#@my0kCm~DBYP9#?`GK%!B1M7d{5I#{?s5P5VQQleVEHg zGol*)`*XSM0(a z4p?X%P4J|g+F28U?nw8Ozox$H<>nC^N!zZQ?+^Zf%+U}vQkHi&Mmk5rqm zJoJTul&D~z@m#^Ubkmn6=!X_!{w;!xI@%#{!c0h0hBbWlJX+#+0I2ae8^Z)#8UzBf+^NJo zNhwJNMWtNBw?jM6oXreqzm_|a;BHszm}E=lk4ddE zj(sB&Vz%6yY(~;!&+I@E@2|nR2_Q_4dr}v<)id8^0Exa{14mjFP}Zt9s;X7RwReM(e?F6*WuZFdUubX+9BUD+TH} zF=N9yx$0JLk6}cdx*dma^>|EeA5C?}U;+J@rgA6*GqLB6c6cR;q}HtE+K3zf|@pb+wD8v4?fi9qu?z^^X0==<5$)D>uKhz z1d};&ZwZ`UwFzG0EeYe>e%L_$lA~IxbwLoVWZ3DY2rv8~DYp@?wE(10=W+{NmF5Gq z&U`&p>;*04MU(w5>KQGD|hV)(gu1ELgZ~*D2IU3uXR{*qk-*yTZV6f4dRt+bE{= zhDZUlw+583^ASDhZJCfGjno$ls$N%}v0D*Mfb7?!)ssR3)t10)Qv5GlLx7DW@GJ(O zHZicqN4t=2P2Z}_nu zGB?7}G3t zl7Q}xxS9N7_eb7jo}TeHCe4Yx2Y<*ePj7g+X~8Da!0vEy9{%HX2v+0y&Hgm9)wkd@ zSuC%@Xj$4I+=b!7mDAc&4|tELs!c99rwIdEQ|OX(*JF%@%;;e=QW``+Y2Tq$14x;V zNpJ1LT5A@G4xw>_@9{85kPR_!g1g2QfJX)RbyN5s5E{bCi7f==bJ{gI^Lb)#0-I8g zAFZ;3yj&PBQ0N^GBp=_3g^6UzXfV*a3X5E=vsKYHSO=x4kBk@coU&f%}_0WgaG9 z)8F_~{ik$xMcOTTv3DiAJACl4B*^(}uVjr5(^~Bv`x?*fv}E=sF9cnMc&bA|sqlJ1!8n zPq#jJ`FsCe0sv>rfxO=@IcJJ{{{WAHUk_t<0q+-&33I^#k8gM1egV;eZ#R#R zo{GN(@nHpm2#fvP4dd(X?j|1|GCsO~--z;0zP|wsKdw9hPmD%=ct?QGhm5}TBh=o9 z2gUY^0yWYBV#0+gsP_s?m70I1y?uj(hKGyFyUa>l0e`m#>%8;jR|&5(K9`U7UOm~t z)N!L0?1jiyBw;bb_y+yv>+TV?O6P~@z<@SsqDQVn-Ikr$@KUJ$GY%@1}{088o%o%0w}#+%0i)klG9DP2Y~AmK-zIwi^) z$2bq2iPy_=3zVh72i?a$83~OT;&i&R6GP^V?$^hXt!vjNzcsQc1WjF2u>t0;~Klty{TyjN_!ke~^!p@iB-9W2cAqFyV_m=vZznMq(SW7Q8~<-Z%x@T+M;V7I)(aFictKT|s^rwCJUORY`}HIfbe3ZGHp<%kMjV}9hiq?NM2D&PSf{whOg9ra_`uK{H|bB5`| z)vAttUtwB93ye9u-C0okV_QFO3UAu10bd_U2(*cGPE@v+50X2>XO5szMR-KZ2~*d$Q_KUhKMUF zUIM`cvS4{0DFu&fb>k#5epZtkJHktYm~U#*$Z8)U0SwglkWSEzL~z!iNiMLU1L7SV zK{twTdcWEXX-kL;#LHeXCfyra6oxGs8)rZ10$Z&b0eq-`RVm`A(LyGO%gn6Pc+y$t zspMAJL|?N>0C-FUW0W4-4kPoUeK+}M(}`TJ8VCZ^>6updX4sZ|JZZby{|BMsL;)2Fn33%Fr15lt+5Uu6UV6=&6E3A; zQ>z0~&WRtsx}w{15eCD`45;{}u3aX#B-JvVu-h_8fs-8UE?itUJD=<(XUBbCd>dZ*({ogK%%05)z5~J`NElGcvx~B1og0?}Tp`?u} zkm1ZlvR?wJ6tKiunriw$TwN6msHj%zkT^bi@ZzfQ6pA9^J)oj@y=49QE_khb$j*j8 zXoCoB4Ajutf>t}6d81qa4zw7qK;M@N3d6Hi?kXM2GbmhR0lBkGjdA(?!PslucplcQ zzF3!`f#;DP(%_un$mrF|AVieAK=?cYjDX%Hq~(=y@VW~o(!EmvPWTeah+P~=e%rR5 z<@&cDv{hNnY2G%4Uuz7*_K^K9AyngrNk)^xRQ%{Fp|Hg|NBNs;K^1$qy;IW#UKwwK z?g`lvXr$bZ0iEZHTxJKXUW8NlHhX!GgNQ(r&l(%u*Y|nw*^GbW2q%0Ieny+Cw<-3Pc_W3mRr?`{4BoJ>LqsZefWE&i^arLdl>9U2I@D?+i|HE|FlX z?6jOidGXzh#>*Vq)T(O93tC8^Q`c|$X(l`KOWkd>BXO+kv3||UsmLY-S;e|+QBPkG zQ~&M_0{*j0|9ESS(3<5JfH|4eiZC`+rTpteST!0qZ6xQJZZal3p|QG7PmQML|$Gn#O1!9`6Eoz&@Y{*p$a zOMpzWjaB4JL1QDlNf>e+cb{?A@zSCj%S)PTP`cq142^HHd5Oi?D(iNxf{I-Ol9$oyhZbgZlvE2>g~QzNHmN8xd|>Wo>FNr=;4db#`=2kFZ(i z4jaB?h#+jLJfW{Epd`pF?0ac*JCRNSvS)<(F{ytOX}6hPy-~4_w{;hgPa&A6kB>aR zax$9s1L_1EAkj?vIupaXD{(vAKw)Cntoyi1>r;blQ(Oh1Z6QnIFK;7E$sgiX@DwSY zF4{8FX;#o7N-N2S)Xzg)INW|#l5I}mrwu;#$KGBH_XmSaO|^bkL8ii#MT%cV`mm$) zx@DJMUeL!{6jCW8V0GuzL9c+y&{dN)8K>iz{Rnb_#!>V0t}~9Hvfb5z41^*c6ef3n z`qqIs`s*Ts%-<1I-T%&YpnAkQ{#eh~r+@PWJW(*@>P@n@h6nULFnjNWCgVTQeLby; zJ?7zBn5L%>Lisn@yew6pC$G3NORbn9SYdQ#-?chbG39&Z`@8U-p5{AYTy>NUDHFA) zkV|9RN+8U~)(1VL8x?8mk5RL4vW@=mkVG!Pb8cet`Fg4&6vQ%MRM0F6W`X*=Kq0zP z*WV`2kpXUpMK%#ak52lxwQOoIX5(C_=rPD9`<&0H6sRsrIl>KD*M`iMfE~nlOV_yM zZqSZ?nS<06;VJDlMQpNx6}jx+t}i6@ySw>F#D_sv!LG)AhPU0Yb$oFxA~O3=ddG!H zx6O8BK9Rb>n&NaF>FF=msBH+zeUtR;h$qaabN)LorNd}QWc5^98quUpPmjbwzKp{Y zVs^I<7xhI<4V0Mk!Kd)s`rz!h>-iC|Hj1A_DKTa-yR<9}#3z*tj|lBj z?hqSPIUpct;B^+pS96Vc1f5qpAP4;sgT9CJH$#Xb3OG9xx%N_hSxv(_=Oi`C6W3>g zEiK8;P`e#$*!DTCFJ=I2$Mg?dP-x7l(g$nq4bJ``^@J9`K{a>VzU{7Rx6rAE85~Ms z&&dWVfT-xCzR z>;1$HKh#8s-J5waX04_#*?&F2U>Z5%4EuZ+=D{-sC>1n*Uz3ee4tvH>4EzbVURHX# zF9M6i7zV;O@u?DudNsqi40f+`ju8y;FIhD9UGwh4Jnkw4EZq9q-qnnLk-PB!*b z)W@YYFbWKXR(G9cK~T6}{T$eHK5-eA51mQA6R;x6rw%cot|VLiU+gqH*wXc z5~)8;sM#FYnU}elRA7o+Ib`C@7=_uGb&u$uh5w?YhnFHpIBI#J#YuMH$!!BldoGkd zunj1KG8ZIq+kX_VAm!LB&BIzr<6CS2no|seUO;6DYQuUFOp!f<@k@uGwDiQ+(Y=}~ zOiNVlCpN4?Wmsl!us%(y70+Q0w7pa}P%JWxBnewk18vjE8!Ayj)2erDLa_8dO@x^r zl%C42(>6+jYMg2ACt}QT07hzPL>N{iVDO_4?g7t1=F4^nnM@~wuW|sMV3uvy4ycui zTH;P^EqxlK{l`g%H=Qk<8-NeKo2bpRUZKzH8^+uPa)mCf;x9!6{7($lnRBiX&tNLD z1rqvpwN)5aDyTf;UA+EPh@99{mD%$mV#u6$PP&7R9t=r7&jaL;%L|js*5?@mFb=ck z6T{ZnUtju=jr9JO9aV2}bs2eV!!uI_WtCsZAv!YJgTZzdyZ*V0L1G`S4*jzL|sMr30gSarh`)W1w<1kIrH$1gs#PYU)suFdtMAbq80+_uMyO6 z8G#{!ZNz0ntE`D+j_{Q3J^CfVj>)d=y-2bbA$hD zMrizHeJG8aw?)TUYX08Q1WP`36;tGrBBFY-WAGpQllfKo`pLsa*9yq5WBS{9`g@w% z&{{okiV$QDl-?tFi$q&Cn`&Z3+a;|U%4wiM==zsSY_S0~ zUA_%v`$(S?MS+vlWo`o!2-(+zmgPktN$i(~Ki3c9RF9ji2r-bYODij*M?D0aGkY)? z9$;+k!zfTR-N`~sUg+&zXyDbW_D!&GS?5r4O*v$ck_JQ+tC3fE(5w2%79G-tVJhi} z8K;e;c?P7B=^hFD*+nct=p)Xe9M3V%7p3_qRQ;6<;n5;1a^t0ay8RE~ET1te2iNt) zEXVmy4b~S9R|UMt zA?-mS64|ryf2Vi>f<2pS7;g_5p974~tk=7Ay~O@+hiw^*263|iy#b6V4IUI9Qq|@< z>30M*dp*5Me~V8SUAXUF+wKxJa;GjS84dN0Pb`N9PKPe;$|?2G44?mw?CxCcZXN!p z6b7~{LDL#4a<%A57iq#1_FR3|>p^MzturE2yiIFc1YGwpz)0$(M6$CTku1oiqkr`D zuv*^<>V2DPa?=X_Gv>nTes5@d%{gxN3e@py%U!c|jB?sKjD7I7=+k4EFj3XdwbcCP z0ds2OYrw|aYTd?U1FJ{JTc&{$lI1EW6%;CNx=I>nb>RmCJ(5=51hdvZhE2)-9H_vj zrA#@DHCn~cx0X*5r*BSS3O4P4jWTDEbKm=&ySe%P?eyHy52M1eQ5|>3D*rcD z6-)q*FYr8jRy(-Fr7T(<2jRMR=nh+DhFm#w-l*Fq(mq00 zu0mv-Wcxat$SOnFnJT!X6u-%qEHy+B52`>Cfv` zvjoYU)R`X!h;oR~@x$Lig&fcjbX5hX6Ri9F>`6NtmMmhwKw!|`TD2Bx6B9C zu5wV4Xjx1w2al)R_thw3xw3qx++g})B0)p9%tUyLs8sWmf_i0@#`mUeiMT5A737B^ z&Y+f0^1N;uo`KajvRH8H{c-k8YQguH!5yWf|MkI$dAD&Cnaa&WxKogn4y(y}w8d}y z6tn~O9~X%H*9ELhdaCJ$&cRvMH4{yP`rQ1kMP8RqKk^=v`2CO)(Lz>Rt$aY6$Bl)p zwGJQ34SnQf01$}cbtkDp>KCY3JNH~<>a0wbfeLk;_y^~5#^%0|1%^JHnkcoBfyDl80OXrq)^j9)PeEqFcQKAw8?o2 zQ$m{3JDw2x4TbFVZMaKbe5YsLexk@@`n{d09g8t%ZgoO9R7nd1k#+M;dRcaZAg-QF zT3$QSJ!Ny{3wt2ILN~nLda_vB)=3pzij`xm{<5|EQLdk}jrAkh>DXYK7#+*PE(s{1PbULu5DxcGh| z%i(&|83A4j>Op%xt4B=Y@^szo;DhYOf4^X-m2Bnn*>5L*vUWsO()(}g1>QDD4#_p~ zvb`<=kh9B%lI~jDXT`=I%A)Wq8!iGh+YCW)QcXN72|21N1y+m95VOGTQ}~M})8>h0 zvH$jk_A15pafni@(PDlIpP9XG;^9?#o}6Ro?ycp=-u-Wbkb@ssnt`1rA zswh_pWmKW!AfJZFg)7iXa}FJI7v`ip&*iRrh$zgA?qE|~!$jDQQ=|YBbELkk?{|J`js5ZW!p3tLtck30uLM|Ui$3UkGm<;<7a5y$J1bO1Tg~gq-?B;T zNd@)^Rt4S+uH;aHA|EN`tFbMxii`m33 zV^ucfECUQ^QRz*aV-Frlma3@FBy?h%ql!nYFpKqLo3X5!7fqyNC`CH{69aMvgc1(h zhoOSdrYX=}2WI|s0Q^6m-o2JNZ>ZcD2)zdnW&9^)pR~Kp^a6mCC{Fs_=2L$h0(gU9 zO&U|bd~E>B&85G#AX>X_{yJQ|z3wfnPGD3J1ZUXQA`ub&L1OwB5ju}Y>wc@*Qo|v9 z);+EHG%O4IN2oHdh62S1_E?oML4s9tMGJgFF-*%UT7{cp+EQwsO|oU~ASW>KuHL(D zly~E|eV(;rjSVoX=YYwPtq1s}(4ArLx1=QzLZJW33NtjEQ z53g6~LTj24mNM;>e~m44r)vgMB+b09XQRPFV%-kv539O=?Bq>kmIO2wwMdXn{g%&Ruk2UHI$$_P|mF*e~TA0RLl75_}unfcpx&#qp~V>i7kZOd_=NW(^cEEcUSVuHRri!tezpQt%&;@U9eO=Q96nG3-mwF6$DV69>?e&P zFlr6DMEeX+Wg6>(efO~8b#&o%o-7xB`=>n0wPp0eLfBX3mf8JSlgLj;bhQ$<<+DCl z$F1JE>5QZbLM=Vq3^iWjuLT)$@MLqLGQG=?$D@@XGs1atyCcWtFwH) zE2NAn2VUHUnuYM=DkJ$OY_j!0?xlpZRCqzn!-7U2x6U)b+nB~Ui}l64^-++%V`5}m zyE0=V=8ww7{e$|WXp>6!^w(#dVVU-E!&i5UDFYWnH>&$bPM*K*hWpF!cz8qUv>&8a zjC1RwLf37~wtGu}a_eJ_s~*$IT3?`Yl&GpkS-L{d+%DeLYszc+!Pcn}FY81eTlU6F z?(4tH-$n|zLw?Mi-}lJ(Z&(Dyqb~vLW27>8tQ6TqQ50@sQCK%5Q67-U#hg$@oNi+7 zIH3{5xJa4kEt=4eSPr&|;N#V9e^j_82BPLjvC%Vn z<=s@#TAUy!b!q?ltJkcXYS5cED~9R+It_@V>kkmK#n($DcxDovO&?^r&E#QkRB#g8 zw)M1{-R_PBb8_fDIb7JnfZn2X#591OIVi-<-o_g8M8BD8N}R%+$tm<|Ktw;hS|G@D z{#jKs8zB?``^1U&9*g^>meY|EU)SI6Zu=!oBPYkJFgygKQ}{`MLJs@Avyy^`cx2c7 z_E=M%Ew{66Mr75n43aHF5$9Z z#3J6@uOf_F$u_CU(*_Cxmn{G!bBa&M#MDG0+Od+%?R=^_YPZCfpf-Opj6|5le*)rO zn!r2p&vzqBi(53=jVG_Sk^n-(I!cXW{|ZL{e1`tqdmm@oYqRC{U3k6Z;@|;+w$cmz zdptc-lHrGN-RUVknOTY**qM#Pf$L+3#QuoF@XYzR?U(bo{`tk}!ByJ^)kg=*1ArUV zH!J$+cMUm#5z2$|D=U5Thgnu_C;_&M`*d8J9@val4Vfg>=n6_0 zm;FW`{M7_3W&92$=Ijy8ft}z79z;ogCQYebqpSI0-11-#2U;0d;JbBTZf3AMy~Lua z2xXE~fynlgqBc04*NiZMC`lN$W-!SuND6lIQGSV2i` z05AO9!Jaih9OBf05Lw>FR!c2zM&BXgixU-*` zURG{khY%@QYv#wc*MDqA_H&>`)|INV!)4h2bzNbg{c^ez5e5%GL|0g7eo=FB958E{ zvOE-NJk@gQt;MvyqZ(#@bf?b}kdknNqCZ}5v?1p%ik4&2H)Bcsg)8HDTvjcI&-Rq1 z_!J4;mZHW)!lnv6l`BjK)kZMvf9!-iNz9pq%%yT>>J^KlTXih^ljtIDgRX-TQBeyF zAy%G9%s!z%+FO!OO#FhW%Kr)~*@u(h6sHSk%q&WE(*}u44j=6#MoJxvKaIUVgQ`m5 zo}%kz7xd^U<`aA}3Gl>dDEHO7SW(3};0Lk;1q5vLKQd=N?t1+aHAVcTr>D8n9y9iO z|0p1wx$%2Drrr#U?C?|*=6H(2vbhwRu*KAcbb%hZ6U6%%)h2citQJOdD)jFzx2d%i zv0+9FEcQ@_cM^%WPm+mX8`BS?u*Ptln&HBS(7i%Qui&IaOSn(e9vBH9@mM$)dEE$Y z2LH#s*z(Rh?fznHWi|ZhmCGGeB?>Z!3S6`U+i)p0NwDY@960ggT(6~?a1B&$eicN) zIz$vZ1pCe6z(XE86<%q}A>@CVLVhIZQA4$0NDyg;m3V$Ri(Q^}xUrO?@-%cm5b1Ab zrjwWL-y%XuSD%jRGQ6m_nB3cg5nE8DO^cM`eC?pa!icw97Z!xjJiwHj==jpKw;45V zp0GL-YrE2M8Lq`Nd!ij?zICR{5t5N^-eH8$HU&S$>tTe~9kAL_-GEN9HzTk}B+rEE zi9jX9tA1m1E(oyk*p=fTkM?n^G#J2^PLpG4O`J@EhiYz9g|5HczAGQx4nP*Pi6d?N zaEAH-LmKTt#G{u=+rvJTq{$38TG`8^e=Ea}>O_%SWI9Yw_^cA-(n+2dwlJfkt7cr| zAYhb5nqqLuuOT$wcy>06oLeLLn9#U0WLWqR$Dugbhzqc%Uyyg>ZH*1*jd<3#ly`;z z(ZN)f4n>v8FmVo*I3J6ESH(q;vdsElpv&&aOk;g037de4#CGwISWkgc+;%Pq+Yap4 z3*+6V+@t3{u7<)eFDJkZLJ-XJWhj)XiBYC=5(sTP@h3j`6KyD5?(3vyR&@yyvFYr$ zA@eB&PA+z#lgQ~pR>}QCN1#*#EaSn%V|FB#853JH>cj7G!lS3eC3xn0nEUrx--|LN z5;gDd;530LIRLqndtrUOOj9nF(q{GyUYxAvJHVV6am1oTW8b|m*t;1mZ*Gymop~oh zM+I*^lMIEmJ=#YQ;_S3afL-qKMIx=t6n^`vhtC`kx3}f@Yp=FR*o#}M;II&=s89pw zYicS{jMYLLFifJcMQ~j^;UZH`D!K7oTSnE?3TzJ_z%#dL~OD)zj&N$!CYD8a3mSj z3V7te0TJUJj6-bFJ2IBiu|I*!{LVPKlYiKJ0*vapb6(J54~aQ`^jx-}h1GoIe1GsY^0vg|)<)!$14C{a$&ItOS{~Oq)ET1FI|d zJ~v#zux*asN?N1%s^t=^_hMRIqIDOi=43(tlzrLZRYbOS>9wg)j88}sBg#<5?(tPW z{?e%(h5v|(tw+K*Klva|w;Q!^HIhu@&(s}cdTv)pwPf*L0AONu+fIL%3=TIRvCR*d zh+8x4laf!XglBjEV%ganlq?m7lVOcTVW_4?Zm_ATY=1#9$22}6VEDT>s;2b+#A0U9 zPb`k>{Wlh4L?Dde9f>s5LOQT1B9dTt`2KgPtJICgHYKEy8XixuZj0oyckb`k=cKV9 zZ!wD2i#6g_{YT~pe%Ae5AOd$%j3rs(m@iAInq7c8g^O4K^-eGj5Ja*ifWDorwT0)3PV2BB1Yrk^GtP#i$dp_M@ z2H=y&Dx!#4=&yC@AfvTYD!7A;wbFQk*Q^Q&HGy^8Yds1P&Fm*y+!D3aN{Ui4LN1;Ni@S8(yH)BEl4D3^ZB16}Q4=cQh*QzIQ#C+|LE)Al_#>6#6iOAwnvOL4`wRRC zQ^|-22D6zZNlh&qOzXv%(LC-cMnC_niu&XWC$zo#J5=jWLY3A~;Yg>*w14aj92iTH zAUzC$EZj38lhJg99^bgXRYKO6EMiT9B-eZ67V!cH|2jvP65QD{xOJ z3Pu>&QSK2u5EfOAuw-2mOmc~haJk_<-;t(wp2I?GG&tK^hF$IPVS#QSf?xz{{Wq#* zk&ZaZX|cZ~MS|<$7^&-Dls*SRYr#b9iI}Wx^Dac&g;TYSrc1W8d5_mW45aoWJ}r|| z4oUNxZ)*T>VJISkInsbKYH$TMnB*Ch*x7-U4;MvuELKmh8FWH#E_%7%9*P4mJm`x_ zvTL4PmV}e%y%`!~uXg8;Y20|(F+e=GG&1PE&ELY(zS)nD!!`2ngcc1VAB>RlZ6MZG z{_AJhMhMTI+oBc89&7!C2o<-|C<8-@d$X~NiI49;f8#58&r*+kF)mu!LNHicPvGF} zrCpW(p!=EA%J=U)-%!&vnb6Ll{5?rN#4@cYEESt#ZVZ>R-ZW0gO*I{OQ18OyRZYMu zdLykIy5_=Rn^gQ@2-W@bBjPZUmHsH^qQSJx)|H5*6?f(r+;!n&>wqH&tm-8}6 zQAi$&`0xBO>>^1b*kivgTz)n`oo)I;nL1Nrzd#-+qs60UdqGdyH9r-DfPpBon@Ins zYc|LK-8GwqjhX5HgYCZ7v{lsIK>v&%4s16h0k7=MZJO(P6JyC+P?IjpHY>wcM|X)u zSbmQXY$xJBk9OCHOQ^d?5e49J`XZF*g?r63c zt_%3O)#ZN&AK!(}u3=8#fOt6Y`upY7z@3=!A!s;a80|_(2^ljqF#L!%vialBqvhR$yb*P zgX8ta(={M+!31FJ2kKUr-@$rmFTcJ%(KfX7(DwKsI2Uw^OEuME>UnbM_F}`V9ot** zNPC-q`LdQ$oJ00JS;%{PZ0IP(|7v``$;-oyw`+8qoHMQGq6l~t%p|)Tu4LTkI9$9o z<0E9S$eGiirMzwL&Yy`_v?xy*8S;Bak99(Py!g+7%Z>1CX$H4H^6lxB^LK9!B6ApC zPp-TIA;q#UXW{#ID73pPAtA(aB=EDbkUy5v&v9=e=-ukh*Y@R`jlUgB&7I!K`uoK0 z9do;}Up!Q}?aS?70E_--W6$&7lHH@LG0n3vM@vb&kh9`E`=x7p$E?$tv74HvKY{X( zpx+Gu)9k=y!?-VR*V*q+?7q)mEeUas25f;JUDgOrTRDVnZ8EeQ6nk(cw1p+hc&;;N znWfkOj&!oh3H!#~5ep)K3K>U33t)u3j2=a$b;ioq)@U=aNPaPugVGY@?rA`Kjq92g z12dcBEYr?f&jEgpe~5ca0vD{y&lXP4Ng!;g0NhXNkW2)pJQ*sQ4rlS^xk<6iLMt)cH>Yis7jD=s1?9+%hK=PaU{X zbxeCAEf6QBm|t_}evoV}7iC#*xQMBIKd_||6Ff#yxHc%)Ak6uRmm6)+Uzmz%C8kfP zU>;y0<^tfCek4__x>g)Pp$lRjIJ>w(A-u!fl^Ox9eA+-gNOeRn+x`tuC&EKpljRsw z#`&v7;NOWZbP*V|`{>;#-4*-`l!`HR;pc%l3BQGAXO8ktXAF5a4Qo7!I5BwQt%x1s zFM|CytiZjLoJMyuL9mmPI8po_VQEoXRFu3rGnLHcB#nrzZbBUN-8!K{?OEsyB5*NH zavrE5<+*At#6qc)-DB2^*dY$;b*_e^(IIfsqCuc^o)^&OuzaHB0zs6Ngu!5fk+2`+ zayxW35u#6FummxFsVJun{CXvTl(eA&8=^53bNY42ghlNK@y}ShZ*9<>R&RRviJos2 zr~OuHZ^NBfxZ4Io*iS%7o%WE`GzBdkG)Jc#8oaxOjvaDeNu?r4g+I!c$Z1;)4T_Fv z?Sfem9lSIti9Hd3e-U0^R?#mp$odze1p%S^8TM7`kJ>pMe^j=rQTct*;exaiDhe_v z3NTmrA?ghxH_5G%yQRyr{CyC{;jNJ?If$rBm=#w+E=Jk4^xTSS3sW2SqU{>bLlIp} zs$~&fzj^rtM=#fcS=W9VcYTBtnf@aJ8>m=8T~sF$m|6=02|wO*VJk*bA3AKJWluwO zj7SzHn|XKu5%VmQuaqeTK@iNkwL)#_+`%Pte21#DV7JS8^i;Jked6b<8p9OZq7I64 zTa+e_z#g{O!g>s*r0cpuI5M^!aBV6YN@c=f`GG~Mxy=g`nMYO~7zP{98roi(l(_WR z3I>dxZ4fAP=!HdlRJ7D(9FL8)hQTU$b;c|O+~TB@81f8P2h_ol>M#+*YqEDl+QK+7GATDWZfG zZLG&{@wauVo6h>(vwr>_da^}9ehg{%-F=zcCgB?9InR3Q<F@APS&O?JaIjE6|kFKw3?5% zs2$nSyACg{QM@OU50qA-VG~hBXO+u#S%hhBQY4D zhX@A>MXE(wZ5nZxxk#GjVR}aPgipRQ+8Xmf{JfGau|k|xDb)lnCN%~c{)~BXv5ou3 zRRo~C_sq-)`XpL-z4dv@<|8u#frdt)!1DEMa@MLDc&S}zo4ZqcHYBwcBN8?d=2czF zxa`bztwKnCZ3i?>(mng&K)Gt^g_49=Rn?k74f^Gk+3Dumprs9h;%fNI8g3g%q z7!C#3O-2I&V}eSPfh|xAL2j3uf9JSiNP78K!zc z1VFmAg(`CG=&OBkvd{=w{`u*R-#30-n%)C_WIEwX9rIgdPR0_081JGRMGGM1a{MTb z>_?r(c=DuL^`V`T?PU4QEdp9EKUU36-HxBlUebWw;G;ZSlI{W>bK0|1K3~wyLeWsb z;wpEg5cgHEl^K)$8Z}gOj6RQ|LMYq1dB{do@(VObl@)e?jw-3}2KctHZVnd@br;&p zS-^e%dsMegost*|`&GeH-4#oKo2DQ7d6#4+pB2wusKlc*DGySH2h1o-0p84~;q35b z3v<|iSSo4$QBL!b7~&zfaS|1BQ;5yTDtPqDr~j7^O{;p3X8#w=$)HK17?b5J&}WUP zW)6Lselx5gHN|6HxX0Tutp#qtkLdmdi~5%HRIZEi&59l2x(Ok%fHmIP8uHtS#43?!>LJm~jC| zoVwxsE=&zsX8%*XVZRJ+?FeaPpG!I%xTKWnzucbvvk09o8Mbb=i|y(O#}^;j`OfBZ zhh%9o47~o?oMf`5emnBMA4VGpv!GK+1HKZq1TuM><s~yNM?-aM?4edo@?a zK~3W%u^``FCLcZA8FAm1dVTm=RJP)3P@4QCe|}~_%*M3&KZneF92~}N@dU>PQ^BMs z8;6}6@J&6bYNF2chozY|b{3gy$SAw_y#EuIkr1py*=5rW*rTEz6N*5XL=GYjNGU{f z9WvWWA@;^=o$41r|H?@6!%RClk%CX}KWFa&X8?1Fuuv3N|fg><@ec&H2-JJN`Fn23EqRymZ3~6Lb znccD=K&O%5EiLJ0@Q|pfd3}4lx4>tMuTJM1S{pX0k&2#+p4Z#(nY#5?A_wRwHaBS) z%BAYk0?#lXz)`>?t)fX$cg@0uctF7|w3LwGJU^aE}}onnN|on7;n z)=p^JdlcA#I`+z0^kA4%C)b(xPPbvx_sQeZ8vf;*7NCpqJ8{Xg@B99gv5iyk-;)5` zO_!$Q-Y4LI$^Yx}=;PI~?|%VbK%l?)ES==?z=I`E!WYl+;}}=P!dshxzqiqph+|%MQ#}`zIBwi&y`4gr9S1m*RJU9mU#=!SdyVw5(_D3enT6FiUK`x|%Cpu^Uw`JY ze1rcllg+Pw`ta*dAAkPk^w0NiKmPpVFHa=)L3-c)k2wbUT9UR#wi02bx=O=JpF$em zHhBQ-MZu)=bB7q1EU#VO5-y)R0eT4fIyLbSbdceXQ%MUcAacbrGEG)EfNl%fGZa#7 z6y@5n41rkCrqG@3eH1_eJA>?eYJ~r_4^57y4N58+P>P1$o|^k*8ZHZS=B1pc;9l%O*%ADb$#sk$|l?KuOQVb;P)KDB5 zF0EWT3&lsS%t{Fl32x!lP4AfQU@q{X(kJq%Swj8n{DI+T^B= zN@B>tpg#BSmeV)_C{5mZc*|TwX)BU$SWq9?r2ePA6Do^rQS5`yI;R({QRO5*xR=yD zdsCsJwtN`dIj&M1xJMplRKl$OhBlF^WletJF+4I$w|PZLT# zG(91gBUh-Af%lcbEdoZCNrYF@5cp_8Gj1R@H6EpdmnPoUuG5&hiG-^$8f#Zyo^i$q zQ-t%{o03%Gl*yDSC0t3s?|H zV?Dx$t(CrfQMv1eG2Gy#-_E5jTT+;?A#vkqLTh^Y7%B@lOfM4&+`CMS;ai4-}Oj3cPJ?k@`IxBN41|jUes&pk;FPj!Y^iy~` zeY6dXpo&jPckwxF1BD)oybdU3Tj6n;;Iwj~QS`_lDIkH2R3-eQJ3Q@aqr+4~U^pib zNb)Wo*aUDYxlU3r?ApfDMr_$-GtUF8swP%!WjR`rusppE81Mq&c+T`^%_uS17DJ~8 ztXV7?l?JDerWWZKu_V1#tQA^ItCPKOE^Iuh4*(Na{l1_5CV1RVS z@>x`L(0gP7-Q8s1t$SD1h~c(1lfuPS2{$-;0=XvvCsbtN%x*RS&BRbETLbZRsFS-{{4)i{Qf zhx!V=BeYLFtpg24qEf^Fy_sVO$7gXw)-ehp6j6hn+D?@Zj8nE&k_B4jK8G0(6l% zwKc|uZE3PbZYZUoSaG*Nk339U)eS>XSv73gICk~quu0(3@}t$bLv_;qYSO{)k|=H5 zxi^VAYQb?#gkM!3S^oJ=lz2mc@q_2bBLU&5K9}mb@47X05mhmEcMcUP4=6ob@Fi?0 zBtdJ9*4b7TWnPMlv{j@^CkILA8XY9U`1PCHJ@d*DIfmBd%5h|b`w#wkHkNmPNW+N!t&d0 zCbtg~d_aV5BSL~3DN6pIF{->zA_6@i4Z?A@m+gAY8k2eZRhzvf_a3?lYNvY)7Ns7O z)Qx>)J8SG1w;ldG&Z=sZPxB6?)vR)ec20HHja9y(AdMtMfCJHF2>*(3m1N2Mpse{} z-$^_!k+jO%4yXiwGGQY)vd6+U)b@nR`^jCj8*M_>Sx0Vppc_)x8@cC{_gF=H{X2y>C;dD`1AKa{?#9U`S!bC7{p&W$}hh4(AFhe zD`SLDW0m^Q>{JDYZYU~NTO{SG>u3drwke7NVvfHbsT`w~o6>=>qx4LzWsuo0RA zvj6ORoGzgz8HJ?kP>PdcGP6dUcG6ji*l^LkrGj303rv0FNv2{puIA$GoF;)ZOp~tW z=e`6DuQ1D#U+>3rX-t7e-6*m-+Xmq9dK;y&)gIpx@;oyJ5=^gm{+7Ly!!Q7MpPn)K zu@rfn^V;TFi0#QyDSS#^BVF3PrZ%)jSW0+K7dF+*=Q}}9LJ=bnuyZr;$Nye^mbVXI zfBydS8@QGA`I9yKC;#WW-~MKhtU*Y*%mcexsT1)TB`^!;P~O(i#39EX+7TTycUbPLBhGE&U+ z4Z(E=8R;*^wJWyTwdDenO4X)%nf0-W3IxNBYiCX8k3LSWgOWiQ+rYj8>zk%CHSV5BY1}=ENMWhwh*Dglv8&VMb z?uP{-vOMbj$}^Zf%ZIf6x*z7Rzb!($Mn0nGT(|REd{Qw$qqnW&S$ANW0phgPks<~F zm5%29a#v4HD%^U~ofibDCQ{~F(1=kFT4G@=E>=KNYG}9+ULpc*a6- z@=_hfdKJ<@F_wh-xQQYIaNkx-uJof(=$7S7;PUW?3NpQfhMJX7JF+xU(a~DGsA9sb ziD2~`hu+BRWDJO51!P?^1q-OGN#J{v@3BxzO9M+DVot#YCwk{`kk+FdfR_Qf(5nvJULtMcv&uVbd>^;*R^ z1FIH6xVTc&B%wyVUV58h zZ&xVVtRe0d0g28Yno$<^$&*5@nEcU!&JzV9<(_c&kt(u#2^FDjt}ar1^u4b-rIpmD zr$YPyu!HQq%IT|(5tDaMz#UBZ{kN}mzBu(f8jzAm_m&30X*uaF`g2`!bV zj1j5&S>))$6`H(GiC9eyk#0fh`d2MQA zhb)E#iG@OCtK=|EEJR5t&7;4}U06~SxJ;Xfg)XLo1Y<-wQX>QYoc7&1@33KE3)CwV zg+bc54=hc~Wo)7^hSsn3SNUnZz|^NY=G49*ViNT5H4P&l0eo6wiJQ1Q!X}L8i)AXt zN6=apt=z@Pb%8wZbdWu#p=+R97|58y)z|1KJYyvV#?6BacQ#e^i-em7CxW_1E`iR0 zlRJH=e93=_U2359C+<(aNK-Xb2uyA2_ttHJ+uQ&KiK?I&qJ#RAXD>OEPKSKQexkFs zq}yw<=Es2J7;!9Z#f~UI27!biKY@SobW5K8n26m;K`uEOh6Rh9y3(|Xl+%T9jz97D z4F5qG6kQh@C!5j_a{x zedx?qgvj2o*Xkaj<;&5GRO6sf>Y(bUNNV!f4#=7337ca!PEa{i2)T(#OEf_Y zdcNEghUO&(#f;J*IPIzv4!wM0!YPmx5~Kif))x_2s5&@D8rO>dByt2=tCxDb(vzdy z(;WPS)kk49iP28wsWP7IIPQIo=DeIUwNv-8AV!AI^7=FGCOTmu zIZZ3e&8s$#JxJbcp1foe(u6h*Ing~7d|b8bz(nFLGmz`PoTfC8^VyNsMWdaMvY80~ zZ~|!&k4I1l!xP9)6<~%=1iBbc4o`ht6qvIS>95pC7%cAn)S)8KbWixJNF1BmqrV+t z4)9n0hRbiRaTIW8fK)bQX25Fp*CvuqV)#G&QS+!XO&wJ~;_#6KOSh}g84VeKhqz73 zK^wd2=k1&-KHpg;GWw-@86YA*lKPxxPxtenTuienMH$ygZ%{x{)aAM?3Nt2nDZ%}e zLQhD-T<{&#vJOvS7?mK61d47+*$$7G#JBrQlW{Dq(PLpxqIPW5%T`^){F+T@T^_hQsnR zZ7UNbv*`FZYxI+68R$dxE*s9Vuy=|H1xXXRfTbEcOJU|DbglDn0HEv5{CoHnzIMXq z#1FKNCn51}Vl>Yvp{K4m>X_{SXf!(On|BhwQGdQ53ES3LFp#eZ2P$*fZrjO7@~AZ? zP&yB*MIbATk5Qv3Au?(uR(4lLRx(>GJPFTr-DtQ-7m8v-%MuGcydqr_wB~Usg=kOe z@G%N9PL)nJ~-mHAy>aVhFlsLv=4>UBpS3r|y*Det zb7+}Q`f^xt4RaX@p5ANgNEOgnu(})@*cbl zS5`&gejx9#J3<0(YTYl|N?}tJ6tQ|z9I*&g5(ue=Z}0fVT>In&>?Hj2>~))$agk(t zZ3Pq? zRiX?kHnL$(f2qlBs?O`NDu=3|ti?|&0YVD&o|Xd|+0DMWt0TNhvCUhxV*zRui0f2= z+WEPB7h&b=npLM?)fOUC`7v?&>BULSlHUB@W1Ipul>k~4=qBGry0?*v8gp5sSrb-B z0Rd99n5tY!3;aD}KCJ@uTdmtYY=J21!F6O24JCC+Y^2r=Gd4R4h!nc}GJBhDHT6uJ z@tv-t*vJ)Q0B4yu)bGJ!?U|Y7=E4ch?U|7k@uYRg$1{VJH61)`G|?!785U3#56u#o zX~TE32(5OQieE-XJ{%+C0rg9NEB zBfSJ@HY!??p&KB_K*enetMh2Rd4v)?v-fscGExXCQV$0#_MU^Blgrl6Np(jT#8oLyt{%Q#R~kjvCdpnpM>4)$~0&ouv64 zc{pGUTsyuzt2Vv?snBz%fI0Ah5|Zw(1s6rpOFnF=uh-Ncb@yJ#JiRtBd>V}2LoaKB zB!5Ww)7RY|I~jLNig??*_5vf2WuXM}Urx$~$}^q8B@Evv^P*)}#|uILAg zwj5(kcHwyBqFUDv4Ki%@42IyE+9M7t^Ql6b=o%8KgmHsNN=G?t0@|${z0v#Qv}zz6 zigvFY0(zAkG?7p@S?N||M{6XwZCgWI8M?`;3PVJaHo7HktX?6pGv20T*Iv4J>&JK` z<>|6_%wRYC)DscyebTRI74nAqxBaZhvBnK#Ip#jzBhzS{P>=Tz!k1KE#R-9wr?I5g zoNX&wLz04uIh(gl~mw;Rgq3ZjdUG_eXH^n8il8?mNO+j$!NFAee}P+nX)jYj6gLx zAdyexB7@Lclrg(HNj*3POKOGlKeC(-#=5nDsZ6`uC`tNNjdMoE=7Mvxy~$)2^^`HV z9x$y{VexWeKw3?gvux1?R6!0@7iVnT46I<0I#d*mHW-)vsLD|umPX7Q{?(Trk_FsDrcBf&=$2|2SFO?AkNBt(VI`ocOOz^Y zSEck)wz7Als}Fh9)J6?j6))8leME=vc#1+*a@4zP;`yDDAzP!T>!{|tDj{f*mRKUEw5K4L)c5Mb(el{T}W(TbG(iEmoKqv6M_ZMcTaiXT^l@&5Dn%|G70 z`Q5*M`0(}HpFX~P`}pPa&tLz0{ejrKTS-+@?fU-u;qTwRe*FB)_3p!*LeeKW3i@2{W!Nl<6zx_qaGRr*9wcq@>v%!P(?z~m}| z;EDlKi>WO#pe;1Aba-J=7P?ukX(wBmqO{sxGG+7F3Y2TZuJyC5;FB<)I&>igYX)U3 z#pXoyL2rS!ibzJCb+$jsR@N~!GtR2C_oovF2+tUrpdaZJD}@>jp(XU{#vJ%%NU3Oa z#sFRgS^7I}3WavgJ2Z*vTc&YMk=}G6*D6i3jik7>F7%95>)0e39UZCm7GPErOqG5= z7ESNgXe5U6oTe#8>9QFj8|tt%P(gJMPbTtOyH-_V{{PWA(H@DEro;Kr| zl!Z^k)GVsrC27Aj6xW@R4nk|mLxbm?YhauHZ&KEb@2yT0pfrF~@UgF3%R5?BD7vAX z8@eyT+>!&PfG30$Vpg6JIS=Opb{fBi+Jpt{nUvaeSU{N9Q5`58Mo*;Gj4(yI9b_?( z>9+{|!9Z~bd?w7-khUXzb*T7G?^MAH%q7HK;gS&cz$6TS;}I}%0V_I1lcXt(t4d)VU=B+_aMBMY-nzMt5!8! zst2UsSf|e?$_Is5Rl>U+1DBE5Q1IQvIi&`=C*XY$+(*4Ar3rgBLvk)m=Pg7jc7UXjO(yeD9(gOTupE>xm?46f6cUlVW|*_Ngn4Y81Xz!y zBfU4$s8Ma4A)<{-Npfdfiwbeyy~;Em|D&PqQuYout8RO~6p!205pfQneOvIB{fPd& zC%ff`p$=mbv9tu>3@|smD=ZEzsCsjI6nAb&Ki;n^-@<6g=N+1I%^ihdBCI_1SNQ{# zuUov-r0#^Ysi%8RwUmg=if9_P7REXR%_NB7*hp4gbW$(aq*=rsf67cgGoVnL zALONm&l6nkp^I49Xjw4y^mgR&|P$Nn<*tk9IVa$ZYfaU11g?%)_ne=`$ZaUP2__j6UyS_k>lu zB}0dCMi=Oq2yk-cl%Eb0K5mz3*Yh@aBy&D-x=snycMEsw)6+^^+PV%E-vy=?UW3NS1=sU+Hok zs7}#VXufLHQpHA&g$UVZq*p^MeN{0K01ymf8Cq3}$^2)Oq2+e9d?kM^AmPu%G;1rE zrf>yf8lOx(K}uUtBIa@;eW*XGbewKeyfEp`KA@HG3EpjS4Mj{v{tW$BaZ1Tkgv%(V zeS&&Vb7_TIEk2{_x4q}|P7+|-v$jxSnib;L^hu^Hzey(_U^#->i5(WF z#rufh16XQ+2E$(Z;YvUv?PW*^(1Kuqx*KV{M-Ys_xn9b4w_)=_A%IC)#ja{EFsfmI zil?O*$aIc6N@+b94Ip$TU1>Ld0EgaE(sU+P0}!f`evdus0}n&W!ruT)#X^J*lRM)C z;JIMNRIJw1p##ZTX*q74QvM(7<+5AXaRt%20R6+A7?6QntKW~zBxmADVmL68nV?9N zO;{oUnjGZsv#NH}3E)uxPhzTb&pF+@_pVw6uFIv&!#Y{g*JTIHNZ0Ax?zuesR;Wrh z0)~&YsS94>yE*MYMq{^sKS31n>8>RW{++ z!Q<85S&Pyu$KeoKK{!`G!inNyQ1~tfP`9a8MV%xd@(`)y(XY@@&%gOeU>ZK}K`77J zKrF!R>-xA^y&8t-&yWP8*k%rV04?JTFVMo#hhe8p$p}K>3?{ym%=bKw^2O-=;q8Osn zG&&Mdhbd;DQ$|iDBemg`@1(^da?BWs4vM%=d`uVR=_}2rLup2iHEP^^=fbm~Js;5| z;|&Mih!$)-wn@DMHG!sGi(=yp4Q0N;_pE?cYZq4|9ow~BV!`6zFk$?Qb}R;u1dHE=OTP5!<%=KxxNw^;>GIEy+~Va`gxjy)T^$IwK00Ti zmA=F(<$y;o!;N0;Sg3YU^OR?oTwP&{4phT`&~qX`%nBDnFOUT8nlX|Pqd;insDYz0 zANtwPAPuE31H6bCaod(@rQbVqkBWiK2#>HOnTa3M=&ZCN<@Lk`k=-KeUaRaC0WRs8 zQW#V-uOHQzJ1A=I31!QAVY9ss2^8caOCqdnOM%HK9<@j+G4V$1!K!TZzSjCpsI8KX zzFCAWY|sxXc9dos)A;cz%W}m*TFA7_3Pjra_dpoW2hmccJS|Fcq@=wxECgl*QnW3NhRO5+dY6ha4Q)ctKtcxNq%pmx$ zYSR7WU;nsX&6ihi?(Xj{bGGQ!C-v{&^glmoZXt%ET%pVXSA5YLEvaat3gza^p?pZ|fFOc_EPcHu@7@@j^i|!=XW|RBTh;>BD$l_;s9x0hh6cT9R z7~7gE%Mb`@lw*KGpmjY~Qx{+AzV+W_YouH1PP}mkL&jU`CQZc(8xuBCNE#Y%NgaxbL>YcY=UQUN41O8P)hX$2Pu3}}e7tiecUyrm3Y^BP6QYF9HpbiWgQnthYzOZs~!UlI<-LX!FaN+S9S@&VxKWTUgWz)osi^Mh>ESL49&m5-tnPDzjBJw6+sX! zKn`u1(llB~`%26Q%KlAnEp`qDACQMx9IH6rCJ0#5@b2s~@Y?J|5NHO1I0E@Wem`^| znrzBL6#)nX5^`8qm11FL(J9Jq)-mdG+e^(t|5gQ6t&C(#n5tFxO0LF525m+V)7!?X z0f?q}hmfSJfDTi3RuYwK1>oeB<2m8vrY54|uxE_~x6Os8J*$u4Rg3LHb$MJ2MfK#jIimbQQeMnK|bIe$M6 zr_1OToNb)fBxxaXY?so^c#gl8{MV1)5&IQ(M7|JJy@#f3Pz3@z*@Ae za@v#~GXmV|P{nbk;0j%jJcbRta6snfJ}Ri8`aW#OaiRead3><6V?>LjJz*325+ zg0V-0Tdm}*J@IA2=gE%nKoUVAmaA94oqMvXW(Mmp1M?&#OuE*2o)kYWui}z>Z|~It z%97}1H|I{H$W>{j(brKPC4_xeoe{J|-jjHvbZ!|fZAA<(%{h!5kz`{xU=9-Qj}X&h zg#nJ86zQ2(I7p5s*`DLB-nZ5&QQgtcR~u_y8he#k;`#mj?AVTItm4-B@;O$WYA1k z{IFukjHg^|NlcEdsEJGW@Q8Q#aR9ay&Vi6o3kYWl=L%}lu;NSG!|K?oDM$X6r<^TH zFq#p~^1w->tA?grCFS?drc$=7f=FqQSK18ifl7z<$knrufm?G#7GPAGg=_)DA#GLh zmOs+2p@6wW(A8pm5pzH?vX!jY5&2`)CK1NQma5k%DWDNsUC&yLP>A5ATiY$$YFMK* zJ}g-3Jw`l8O~xACixzF!MOs3Irv9-bYC<#*@|n8Yvy$0!v<=Upat7`dvY)2BTS_F{ zk&hX_&Lx^NaJ%Rc+K2Gbd}u?PR+Q6hoD$*Vo7H|!#Q_7o9ml3}F8wZU2wkvPM6y4Q zjV<|Xp1$nVe7y13Ibf z5N5NpU?^=r*JIMnOtwR~YXavAPMqvc`Y}FG)Te}Q<#mI+5q*}Aa?h}rP zm;vj(*s=CB?yH{azA5H9VS)o@^QWvaDV$LMHSN+>+Rdw~pmIfb@Jb5?-(^PiwyGhZ z0pT;|Sj1b86Jv2SzBsN3LSv_}HKs4h;P+625)&VnN<4Q>8F+CuZZjx7f3(?2it`b&NRg305A$blNW6@i+W>LJuk{w zEAX{}S&B%EzPv}7PLilF*N?RGdBJy%3j@kqLL~Ti_aN}oEO6i?J#fFvR%<*_owY*v zlE;a?dQ7x~71feNs`I%AVrjo5n|5{lw8bb$t)Y4~`?`WJ6&+lFQ|;E^cWr38rgJ-j z=)SftT4d|v>k`ud2xy*T&^7WW)sH}=Z%#Fs?qlZ>k0E^msh{M`R?t_`=`g-Pfwr!{`heJ%gwLv{&D;9@#pU!?myjr zzWL$i%gvX6-8|fU{Qmvp&)(;+&{k8j?Ygy!^8Wt_y7GnU;e{>+4E|06k#++;y+Ai zf>%s`cWWby)B=eFr)WVqz_tY9|8q{&+%+p(YdOF|7RkMuey69qtE-+m&u{($J$3LGKyz|em ze|2B?_2plv^y{VQ;%&J`rHbgp+vKa;`(F1ndW9tcVGSiMr zW0^q9Gu7X>)C*VPM0(I$Q3phs)45{b3EH#q4;Fi-P6bs{VGG&APKdPHy6x%ihha|K9go4*W7q1T3dJ|=B(()Lw*P_FJYEi@IzUl6Q{v8JOc^QXDWCvC-GCY zt=CI?ybnh!dz2$c}gf*ic-zP^IWh z;FMxVMXXpL-d2gJh|{2Qv!5Q=MI~mTV;_T--P=zrlQp=3BK+_gd74SiO zm4o-P1Fu9IB&Q4KuhtZpn5Bg1K3p!+6gVtvn$AKf`d_`;0;}E%A zDG@A+s@b-kw|md?5pnOp!Aap{w2GCVZ?@%5RXov=fuhg!%Bqma9fFQEuX>5uEltzB z%@7U~H?26#ND%sUiEevp+D+4_94l$s`lJI=ZDGn7p>sCRT;G5h6T$~drZTVfQlk|f zEd{x~w&bCuEZz-mtyi1*xKlR5RjK&=`ZAJ-S~x_w9%~8hn$iRN4ZGP0`bfJ;hrYKu zVIhbNbkP~Lj7cbi%A@^{yPt4kt)^)t`fN9y7-ffemh?X_8+zfQDW|h3rxyf(=!K4S zL3^q``daF81$q?qm&6bLDsM-oF1hzxK}P`w+PY)_eS7( z!_3$0Kr~u0uW86m;ae0Iz0o}~Y42ka#24=%=*kc0uE_KibZVuHryKU%z15FW~(_+c36_?cFmSn9_l+9yFZZvKkyEM6Kvn_DI3^YQZ3RU!@8zyP2d4kxlkPhroR=hzL*I##1WH4p|O=3g`tpFx1_KuAi+88TaFQ;c<$_*GB7-K1Tyj)4wm^Wqsf z2fNqK=??X^N1$LU`OZD;*JM>KRWC^&NhEw#QqKm=?9=Zg$|bch(3?3{Sn2NZMjXmn zBy8{^OQ4}jGHXDjzH}eEHAOTa71!$ZerD;K&`LJMWI}+jPhO^YZjpY<7$mU>U{83u zM;wap(#I^iInd&6+oqt(<8c9%`)>WAeXPTTeXDBYVMcuVoV<^fJ)F1W`K2e*3Ln6@aT&4DO6uTPe*oYK! zxLL5?kB@O|b_T@>#3VTj5NVKHY!!s(l@piEqBtMUUIYa;OdqGGC(7E0$-p7g4O=%i zR1hzAe2TWH=(V^;cqb+Yr{B44AR#)ca)RACVni4O2WZ}LP&~*p3pEuvkeq^@iI;X6 zar#JBzl-wFmB~cnlpAFC4ar@E4vQ5V!ZtP8h{A7SF1qG#)6_?MX1RR?lB+0&0&S*6 z^Nzas#&L_a_xf;2#%pf9<`Pao!W*(_u+L`4W)X=1tik{cG<7;=%?t)(;9mqJXgMH8*&Oh=vt&( zTXCyVa6GS0^76d5Qz%Hi#QWG-G4|`pP@bEN$X81R=zP|i@%-J47SUp>%k+}`M;wK` zi@;-V;wWdd`2;Ldl7KXVAJ?U8;u(sM2)AxPk@EYD=T0J%wh}ht+U{zi;#v-^hv(B~ zboK#h#Le3zMx2tPtcy(84cw3GVD%G(e0-jL#wuph*m=dKF}Y@S^4Lu)6EXr88|>>X zjj^&ZR@PaW{P)QutWjrPa*K5z@TRJka%HraG^5<2HEw^Y3zTNmpU0&;`_ z3woh_UigjzKWR_Joj$T$YRP7zW7T>Dz<^yLCfrMC$1?R$oHHZ?~c4A6@9Aj0Nyl7nAEb7BZ!xJzCwL_dO_qIdW*J$b`CpW zMvcbQyIOAZV+k4;_HD-|C+Zb&P~wASWIw1Dba&P4@@y(v$P{6Z?NdGKD#{~dJ(}{> zxsO{>zbPn|Z|WMArQRC)7Az4NK-d7AW*ax3X!A98%yz1G3=6DKcvv8eRj>esa^!=Z zq?1TNmPiok@+@WQLJ)ZfhdoWU$q1w*CbRF(3VgV1%@@awqK=i?LpUon4C<(uwktr1 zcsVf9>|m+3XTnu2C1)XLv_8ig9q$s!f`=o+e*&<7!nUzkz(v#TpaGA0;{XVy1Q!|jQk`xB z!X+>9mJ2$9G}m}Pj*7G%hnCiQ`YlQuL0e|?HaDx7IHI=JyjxD97)lBRfm+u@OFGN& z4y1GJ%;;s*%(40&3%0WMsP8eYB+_-Nx2TA6s-QJ#a*oXBoJIlKE6Lkd6UIWY?X&xC z0VIh5a`bu@JFa0f}n5U!v}VE8rjL`L2I1H|+2d8OlaD zUkVb=7N>xG5mSIId!hGqj1a_;7<#W5^L0hy+~V5~u|lh9SN&DV!~r_Nk}RSmUsO=K zR+Uizn(&^apcg-HCp`T?cewgCEZXq=TP#a$iIbdNh-@$mF?DiWG0l=9FJyjsJ9cE6 zq~+buP_ZQ&GZS>GyqFcSWpg6zt@yaZPsP!Q&eiKhI;rbgmPBg$IEUT{J~nn;eVMg` z2WIA6`yEVsrwqN_sRpLO{<9?C_ zgF3JER~}4_mQs)K=h-)})~TL=Xl=n7U#Wbydgy?XjlRjWMStg&Gx*t}FvRuH5g%-w zQy#^P_)TP3SxBj^?O;JaJGT*~)V7FQ-vl9Q>=vHny|r)&Di;pAhY7ei+jJ>5<`!DL zqcl^zcaNaPf>d^5S!{W}ch*1JCFW{-gx7RaQ5mh{U3ZpHp5Lro^lQ2px=znlYcgTB zf&(gN;Rprz`Y}-OYoqLYu|`t#nwatPm)8!qgSAxnvf;nIHXeUGtG*@GZxS?SM9Rbs*U!<&#)Hl1fMC&@X{!Pi;&~_2s!C-NVH8BPVkDc3trJrAz3HH-hExkiNN_;K-x|%DEg$4Y?sL< zxugSG?3XrxyF|?MVCiK674y(MZe&-pW9(&h`00&v(&*h?Q>>Tm3r>FY1ft!rWWNqe(y?XwD#Q!J`NPOoBob^A`Hq&JM1qTo37w@AK|yx;sH6{bN&Pf%p~fs=JhfX9mb^v_x{I$jD5?2XktEi87Xx66!Z(LWTR_8a7tag z_?FbUzn-Hyz@-yoQeQ5ieMK1wMXBSJ>~BSLRQ`TZ&$sBl^O#&zc@n{L)KB&2Qq+u5 z8wH4k6|tFt&b6Wp`RtmT`!}EWf5`?52OrN=$$z|Xpm{lott2I&$&B&ympXZRB1PK$ zJR7FGQ^punU=J``s*%l-s!i2|o$VM&8JopggRhwS+bOCi84x{8Pjp`dDsLmxedh|zVx9uCP*dQzLx$tSQGw+1vY#rt>Jj>MGfmnO{Sp2&s` zGFaGS)`VUOl4L~FabLfCS-CK3*pUe{yQ{YmD9WSlPK*Y4aCtZY%Nnk|{ ztvZ=5-^MfXT!PRVXrfm1)Ny@h&gnBKT8~&!0y{O8lZ^o-OkyG6MmuU zF)DA#uA9$gw)8LO)ISG9ED$7dp||$zh@M$d7rcZ)3;@VbK|U8Hg!}b+G5-2ofOdRF z=S?=vXLNM8DVqFX2lD|#_WXWC6&L-cmZNNimh8J!Z`*5qmM6b%$pD-@P^Dgkxb-$^ zHqz~N5aEZJIU}Iy8@+licTH#6G2Z!lKVVQUy{SGIo5O;^LE8+g@RwO5CF z_m^Ld!_@~bNcq_&j6MG83YI-=+B75?Vq}(N+F>${Na8*HTlPd8T7KaE-YHY<2PGOu zhJ&T9M8XnaZ#4?~A=p#DA{5tqb*pxp1w;jIxs1db27yR7oyR5eJZVW#I{l$K{_M3G z1^udoTglRu0)Bk>k>0Cj#Js<$S0LfcqUCj~G1aP$4at7Z7t3CkCF$jRZGWkYRY_|1 z6jYS!lJ>D$4aT8nUQ)UX6pdAvT&pVhe553fiCS7jDVE$}FA3MD_Wo&+R>x^IkJcE! zbv1l)FE={*E)ea-Wn-jOL6#44}AJt1V21a4wFK8rw}8 z4c$=!)sqDMSf>SJP9rniTY=aNM!UxY@z`e=NXfUM^#@RPyFLs@`EHRwdz>+99 zR5Cd4i8X_+QM#BLCItOabn-4y7yrWcx2yiTRdnL1^x(*{c{t`_IJDw%dDR%x3V?d{ z;c2dHrImZKaOPRwt9-?S*$f~mHIXOmM%fvDp^nn__DU`)Ejg79>c74@Y@)*}GCn2g zV_HyTym!gsTrSg$KO<4nEKM=jg0t3MZD%YGa7*%&R^$*%6;WJRQYs@tJhW}VDK;@@ z+0}&Kq}G=Y678KRKM^`;jz^(7aH3{3{BT}fYl3Sm*LfxcpUJU!-|ST%jk+vXc=TMv zUZjSvQTlVGr%WMcZUvTWBPg0r9pRe#LlxZBGW^-W0m(RetSH-RyR4;M>u_4deP-^r z2cpEFhBqu*Y8>q0Y~Vb~<{FP>J$I0c3{tO|!84@~}V&o@?F6`15&X@mmd;mXQ zV)tKOniEi9GN21D04GwT>2-SB6_e-Yswr!f9up7 zo~tf+pZt-6>3DN?OqVh6Hp2WRV=+m!L#A#*oHad=YqRH+Qaw5t_uuT7$&MaH7Do3-yhASCixL@ggjgA`kPsj? zuxlA824Po89^Svtxsio2blY-^C7ZJ2{u!AOcld_ulOfWiHDr_RY)&u#K3fYXELy#^!fJNK3ZQHipU+cAP+qP}n_G{a=_1d;I|8}Zo zF}p02EK;fD+;d3+&7x znSbS%jmEfSHsPZ|O8JYjxvXeT-3Yer@OS0GVbOAArsX!xvGAR;+xBOoE*D8SAjiOc z3G>b|^5|wui3{R{>IU(5#h1)LD^WYDyJ0Xc+`_4r=5{SqA3WtB(10@vu`rQ z+=BJ$=Cjk)>wSgQ98DR=tSM42a+tlb|dk{rB zg}1ikV)P3Sed3(bvXWR$jSa_b+0Uv~GJ2)Wzsc&JI|QR&lYvtb9;yZz#>2wCI4)I` z@zS-99tt`k%eG>Zs0XZSze?8n>9w^w-!SSgUH9=BHmV(2tul2`ATqG`0Otab>eakC z#OPaDwS7z>vojwX2-B*aDG24h=b8GP^AcA*0dXC8PuokSF_(|ji1yfQZM`r_t+jfC z%bQM15n?INOS=`VqdMKL>mX;B*Nm?|=-PqkfqsuUudzxqBPsUmssdnvPi&E@Ked32 zLfq_nkVdy;=G=FgYW~dh3gnrdGV&O|IBI@CIJf#&>vVa#*`dVRnA5|#Rq-FVAuW!W z*ci3M_1_|0>RIVX3V5eh>A*gifrXJBvW24_B7Tiz9*ZkveB zUdnLjS*>XFU{p>+pP{q&ZreQDSJvJHEH0WgCWcL>LNqE zw>x6+{xxP(cxa-W#sRJ#@**BH;-96Tkwy}56=W5oYbdAL{-;t9D}6FJau~X?^~8qp z5rk`ip5$66=QvbYVLAg85`6T8_=ZN`hZaE!B`<3Xg8h8J5E~*=A`GwW2zpCtubi&^l{A#f;q60-6TQ`%L!Mm47=)4$ zQn_Tkf_6#G1`KyyaGsIplZIBdzdbm3OS3~^7Y-Kt2kl{sW!x_~*LmN^Ds#M5&v zIGV(ncknR54-E#`5~r|S;U-B=Llug_oU9sH*P(EjnZ%XJXd6d2WeKzH$41z0ik?Q!hva^(eMA8BQR*yv6uUSn-lJ~o`ydS2Wt^xop|*g46+3WA zmJcl>cfLz81REbi2mmA3Bc;^udYE>^1L_zS?e_#j`ZR`QVuhe-^<95Xdo@({^9n!Z ziB}Zp>hpKw`?$V8&EFC5{k~Ys*ApD_-uZR=eAwIj`8cfB@AY&1U8+A6K`~3Rc z!fY4x`+izsRLtAy^ZEYw;$(dJcJ$b#PWTk3u6U!r*VFmpA8>z&z?i3J{(JblbU(q_ zu9$xvE=3~$72aR&zmxOZVhCJ#yQ{RjJK*@}?Y7+Rz8vT6@f!E?5+}jhXD;dQE$LqK z+j+S3ig|j!gV?v-=izz$uJY^iwcwdE<}T>>r6fr3eff5y9lS7O>8|@0cZ#@>g`d|{ z3EeJ&`g(AWVK)bfy5AU(RkZb$39%QOuURJ^x=HUPeD3V^EqSf6hLAQb%R_NJ> zpUyV@cS6URh!duKV62e7jk$G4Um6nZYWAF}HH*;yN2VAQNbWdSwLPF%5lhKJzX?X~ zkjpbpooVRCV45==zWH=ZY4hCSbHG9b*JqdU zv~%!)HSj;$->>QVmVbj01?`(6Gr+Ka@xWj+m10}R=7Dh-#tfzMYV}1J znYqM3jGIsr3kfe~F@l2u^ijPchhqrCbh!c`W?{xjsAEFZWgWGW;bP%DwwLylFD4qJ z$ISq8Ya?g~vqm_!y51dg;w({V0KdS|torBz24 zKKczBaqSM7cE41`H=JEQA?cZpx1v_0#1Bom=#6+fQ0<6;BT==?9#!PC*8OMAe8ZNY4LIp?zzg`9yLeRt zv9w&95A$;O?1IopwouTh7iNY^n~9e}5jpQZ8CiEuUJn^Z-Qq^_kDj;4L5HB?RkFM6 z2qMtQZ!G~|YhJqjY=|}Nz0ly9*pj$=;yzE}p@}m>lO-@x7fFzBkF+LrzXi*aB?}GD z`|M8>^r~e`@w`qaNc=e0cBnN z(59p=%Mcr*7EgC*;j|~RRLo*TX}}s@G>RZ=3NYqJKIl@GyBMrqOYe|V`p=q~>Z8$Ln?teO0y}4n@r9!CEyWZaWJs4CM%?bstIIi8GAu{;N zMfP(JU$4b_my41=X^O8Y%*`B1P#1=XcmV{U^d`x~I7`lZWBL}>o?%322Y+_eiPN^v z?dBG~B6Ya+U+de++JR1yB+W=X?Hf7>>=Oq}INrZNIR%Ga(rqe{JI4oIHIIX@$CXKfi)gd>$K-l1jsKwM_Be4p<1ixG)ZMFReEfGI=< zeovD0Bw6GP74)Td6|9=PCx$vitQ8D*&;d5$as6&hH_zH<`l#^kArGL+#Izv{(*LM7 zHat+dxJUuu$?XeAk>hEYPKkulWyH}EPA^S;9Fh;z%qwu1iAWOzK-#TAM(jp>=bFEG z(ro&$a_X+CjAao^zt?UQ7)m9ylGl5bLCY?ME^GxxH_4$COoS8jX9Yxw_0V0xz>@3 z3hY4fIR1=@cHV@kGC8R6TQ$cpMh@u;@MnknP+^4SZ#wVwV1GxuC;v2bJ8Evnuxt|z zXmo<+Nu(txH4>|);y;@rrJWP$)D)vyz@(Gt`kf3bxwThahRkr$pb@BwPmkia5iGO3?r7NpS#b3C4Yo4gVQoJCHx{$PhDa z&JYulZhi>Ysvxbvdp)nRBT7Gk5wdo?)M2R3(>{a}MB;-e**;sNMt%kn{3S zJ|82@=*?FmV=G`Ve57Qaj)v`}z+qXZ6ujqBgTIa*$q5 zjZcI7$|zdoE=YTs2GPX^A1Je#?!Zpj+)fAyU_DNyc*%WYGi<8lETd8kt0SUv-6cP! zhtpBScp>&>Ob{cy;8{9p@d|;^ni*0+PFy$r2gLV%^a}z9q`Kp=`F{!LIsc!+d3J6l zmj6}krmkqO-;Uf3d*|+v4#Y9P$C~~bZsOduBMVdvuY)#0?_kP}K_VMKNBD8g+x+P# z4b(*)L&E)RByG$<*ul%o+fqBVo!()`@oLuD*A-)a#OH?vyD?*+2S7r0!#i>~4g z9;W?Er_MoU#Df_&G5f4|xm?Pn!XN_HKJG{EJ*>yIlTdpO+ zzL%~jvYBNhzlYhjOXrXgD$9Y#8fJUaHGK}3#GZieGw9+T-8#X7XH8PrG^T?fh8|Db(o*$`}lS7JQjZ0PH6x0uc8p`NL@BBjMUN(Ab_IpuXqp zh6F{)Tw}NteI z(J_Xyr8K_zlWR>7aqQAjqjHu2R?5p0*De>!*T~Pk8*yfo652 zIU-&B^MW3{hI|Mn&`XJWfyfk8K8FghV?~@iA5FobAYNMO8s*kah1phZJ*^tuGVroe z1p4oOZ+6{yLWOV-z&h6)$*fKwrW7=pi%Ej#&=f-3?ZLO{zvUs9PBsqho}`vHfe3>M zf5I}9W>0Acx%WOvOR;fwaxpI2yb@PFz#CPvv0C7W%m~I(orN*l3WJ!eYy46vYc!wN z6d*x%`vdXpD_0pNg@JO<)z;X85Q)bNpQ{EDL`J(wuO7qP+Nm_$rr7y5Vjn-qIA8P2 zhxt&fhJ)8q0!oz>`Y|h3h@Usz#@vGvb%_}Ykxjr|KL6D=?f-7+zrgpKb3#7}72#c^ z9fLvbH5IRdGo`?k4deaX1Civtnj#LbVK& zo6-?z1cl@Vg}ZWwSjr&JEXU5~*S*qYS)6bdIgjj3zFRITj4ANmjD|sd@JCK+{ydy;?$+C4CI}aqsYJywKoy2W8F_Swm5PH0s+;b&r zVFPvIesjZU)(RfZUum4 zKXK6OQG2vSCHqjsqa9A3HdmZISW*QMs#Xm!X%bN}be@qyKtQ-56S5mUDU>tlf6~F= z7JdIB@l=uF>w}^mc;bhg54k?2b^mwkqO)BZHDE{`KUOmaN`;SU&REyn!s~l;#{) z3LUFA>Uc|IZ~Lr37P$1H#NcY!GG1!5opCr-XZ{4tct)I3;(tCMHkR;}E+5>X;g~CK z#2xhYfA3O&xx)lHt~8uY0gT*xn*DTA7&f0bO!YZsE^654sWDHWLeJ-#lD z-ecPilUhTz%OZDcPxAP1K8eeqQ_K*LBIqWmV=fT)-xx9lQWKMMnsrwH&4Kkf;Y#bR9-hR@w0PJe+j}*iptns2vx2!`$-kVFrqp+j!ggJu_ zg*G|{I%J8UhMb#SXf*^S!!$ZrOO3T%dl*JkyQ<$Jr7QHvNabV0-}tqpl(4DJLlie# zyEV-bY@f!Mud>B*XC*LnmxZ%5SLpalHt7zI=9TzkqnKYocR#bkBKBye;1sli(HxT3sO{ZgB<-Okt`Wm308 zY5dwKdA`12GLU|QDjXu+doiE_$EaW@W);c|I`5E&&UXpx&pm86nK&TgI} zm1R?p`8T(>4E~>oWBLA_zV8PI&;MmT3OYUo?e=~j3G4m6J)Wn!KKJzZc7D7*PhMvF z?EV|QOihKWGv3_n`Fpr~N7U=({eHb1RQKNB{ref(3UB_U7^?ZmE?CtUV?=|2%xtSIhYVbUvSVl4r7PZYDWHKcZDdPf-L3nJ_ z6by#Hlh~x>yF7_*ICe z+xCT40C?pp@`W#gDGOc&iwX&$3_AD+M5cMa5{Xs96J&rXz2ZtHgMsV@c>z(%ElY1P zY9Oz|^Cj8PM&Rdq$n%zIv8aH#m%@(SG};?#6Lo_EEMvUY*5JsE>WSwLU{|h)C25`Z zAP(Zt=PWyEDU#w?S$LH%oJ5Ff{6GM>yANaZ#{QIki z#p}l0QE0xxG5$&STQ+)aCV_-)OE)W8)l?n<5G|KSiPw;#KW8KTfl;liJ8AZzWRg#* zSky&sjH*b{;;h8q;1wR#UWUI|Q@axUT^!mQ+I6t7PU^;kv5}R7k|L^!$%(uHD>>I1 z%3_#JqP{>vM0Q=J`o{H2`34`&M!_G{UK>$g9f_3hh^lLpoM?crL=;+B;DhHwt&pW2 z^&(D(H#cJ4YP+7-m7>=w4&w`iMW-V6Ef{PwR)6=Vpw9?&%_AVexFH5HQ4!k zyuM-O7^8bEQh-6NDZ#5d&%mLdcMKj^-3aT)L@9?2vq%$<$yws3e9PfG_>#i9QI@kR()nT&3r!VR*4!~Az=c&3Aq!VBJLvEssB8W6`+&_)7o%+x5*;^NdP&TE+0dH($YmyZXfrlEUa<2u%out1BVd5gZ%JB*YKu^p8hZ zmCsOTQQKVoFGBy;lQpr3sOlX@{D$;+@dBvEfU84Ct|gACp|VdQjcLIjqdpMpx+Oc3 zuI4G1&G%w&qI~M574D6$te!BCBm&L_PTuWKDb8Y8ySkJKh>-Ohl|VpK3P9#h1A;IE`R5~Guo+hsTCnmb=m5X@9JXQk!H+&r#7LDLNAekDI}@MVzCbDwgi zM|T_rQDHe@9Z9OjbgW+E5oM%X;Fw_6vQ{*kt#Utu-7gv#cjgsHY&P2RKVWTX`$n^; zG$9E9iWSxV?pL{Af*rsrvL;G5iYb^Csx$qB7z4#OU^&~xsE(N=s&yC;qgQ^pFzOny zWoX9bNSi=c%W6+(t?SM_7WqE2A-EX;2y7ex2~_&?1>zuw-jgNx#&$u6X;C4QP=8EF zZ_))ae)~_z^u+^Ho_(c1r%zeyM@h*ZrC_HV#B~h!rA%;E6G_*#2G&PSaMo|Fj(nD+ zYbH>+%5b7Knva~Tu9h2HlO!BSK#Be2pqQt8*>x=$Zf?)y4fSn(SKFK)ZoY)$if2Rq222xGX7~ruR)8%)q>ir(FafEYz%){DV9TkfQS? z24@Jc>Y3D7(!?#z6v&p3t!$~Z_B!Q|=P=A*Q&G}J6G}T+iSblFQrJ&arzOY6%Cp;W z4sP&?bUT3;-3!<*xCD~KO)a6JSLCEX&tK&){`Y9|GE8g_zxwk>s|kvouDHb~1`~8= zRLN|kb!I`_CLU2Ofd#ASVm*MF7fQ_s>)ZE8@<&XKrb0Uk3*3vS>;EwR8-r)~GsYTQ ziB^W0OK~Id4`*D<+VE+0D5tI#f=R7QxLJ)?Et#4v0c{R_S1C&i2mJ{FlfZEdqiF3^ zqZ8r=@+35Dq_|ZJl3M205o>v!+SiwoWloMNJ3Qy=*7>s5f)mz%bL3Zt&nTi_!*{Rb z?d~-o^bHuKR+-v;z1=J8#5|Oo_}n$@CY!CZi^ZL82!=%f`NaDfLp@A?c3E|=XfeXe zrhnui5ExU11p4@m!5m#*9bt+h-~I(qKA|}3IG0c$rp1`2Q|zhYnM%nuM{uB$vJ-dq zl#r9@Gb;%|=4~Q2V_Nivu}HzQlL#}j$#C-Z*ESlYWm}2iy`3UPs*wYu+eWcCFPm8a zYpj=xkq?*gVTzRw>;x@XY!*!r)XTI-wYx?772@$fAyo;DvgWb5q99}H;@(>BY|-sc zC)<7)7o>hD+BnXakIw?UVMyiGwQnOrosuFM)I2VA$wPOJ%^?HW7dglh&1l)=6hCom z6sI23Ug#$W`5E5xCQujaEZ(nabR)Lu93PIj+JjPs%C*s_bdDDU7_(S%_2)-UZ82%S z4w*jB3?hzp!pWX0$1+;wIG2(@?*#HCq~33H%EZl_lGg2uzKj-A1QX4wf`bvAFd$JE zfBw@D7Q<`Hz$q{Vo-L%ducKHBS;#Yt;&A8m=%G0k%R4>n*7 zDbyq#HlaF`S~Hpg9w2|f5^|)aW@=6=B3zZ<%cF6>F&~rKs4KDhX)@EXtrfmEmCYV@ z3L$d~IZ*=>H^Uc|0`-+Cj$H>^?B4A(=a+ytOSbEdr04!Li(dRF?p?!iyzRNvL`1|F z;aBG%dRfr$>T)hDAjt;5W+g4@_taaT*?DX3Lc3DCJ>-}HDzDZD>MvA{P z-W{aUt)Rj#|22*ildVXS{9KS zK*ASfG-3dePq~cu^R$dbo4>+#WIwCO&OydEBNlc1B5E+zYMHgzX9h!S-t)%Hs=3Kf zF@|l*-xx|~5eFlyHpt&#FU7{8!%8xIE@Iz$a(=Z3S6||0tZrHjal8JH%;dsO`DCY|{BgD*|_Aj8YQ=*nLGDIr#GBztJ> z?LG-^9rJyn>I_F2GlLAp@fRf>VbU12DHNHbVjvS72wOqmPy03=PMjY_6>AnP4296E#Xiho}Bm&Jp z;ybPEM`H3eYU}xnmj%bfPcre5FoHhIwScIighWN=sLiF)4&HpYphy>osxd6An08@* zGH0dqfB{mxHmx`DI@uj4!aWmELVVw18gt__&VqioDM4mDDL8QRoH}NMg;WJ~&=j zx|KoTvom%YjC5Jksq*3AJM|_=wtUr3X;@yY%7V0xMY(|r<3$(;7+$-uSVkF5iLDS- z1b%=tXeL9W@c?ybXBBvfZV6cji5Kai*64xGrT6A8XkRH;=gIr&M8g&3x~9oNCNPgf zEfSSOSy%+>yq?t|&0MiOc8xQRcM^SkUYReMjX#ziyqaKmZjKp4S7?t1@#e`Y$~GEP zZVCXLT6`}2ibel&p?3;O=t3sx)NED&94d9qKmPmJgu{9m!z>?dVvHtQ`Y$}syLdP^ zCYRQ%kGNyo_2dHrB#LZYEuqcXGMIVs@&Y%Ia1P~Fp6^B}rAR~u$=pccm!!PQT@?l^ z<-|oBbn8}HxH)iZ@K?_wv4l?u?ikWV{h4uO<8s3_!^^-kMGdJd2OpT|Tm=ZU&@Py% zuMy(enyS1vF~7GJDzO9F?1j45Gcy)$^K^r3MkwrNUNf{s#{)UT`&pQIJz!yGe_CL| zTiwCq%TcjtlzeOGrpR%iGoWhJUZuN>?4&=z(iYLu4CreOV{5ZG$2X?)qma8=w~*}L zfb$b^OB8|WYBt6P5=-1@MzFm`ZePo}$OCMNO}9qZEd?~l#Eh8&$5)fP=yfD+z-oGDzw!iM8oPNz{!o-homml&qn{ryjEv0QkyTUkF>4j@_ zIVv6Wa?f)fq0pQ|UV6EBC8;@`G}|Y!;5IMp|Azu{J;U!Y`t`|>YJmAnqf9I$6+lel z1Gm5dJ&cKTktmdpnXz63n=#aKvq1d6(O$ zknh_Vy}fjp?af_ov>@#o8NW;f!>H7oa4e{NSD>rUPI{KoGOl_CT4$9?cJfJ_-c+yt zh{Xu{T0?3*d-f^@e|t|6GC%C`K12$3?tD_;AEC-yj-x+Ozr&3Moti($m^c4b2IQbK)AJ-MshR*z`VrPvToy_6u6 zU@y2d;8D0Jx&Rz_kFdh|o|gm&Gb*q3wr+y4 zt%$^ zmKI(=KMEt?{r|Q*HlQ@dy2v>2N;O|E8W@g+{!?pF8_fB+P3(gz2>cZEwR0*=e{ffb zWmfdAoX}TE8e`B@1 z{V{a;VQj*_fOfYlH{ju9+=U>?X8N~)0X)JIF{=6|Mr3fDgv)ARgdv5-w;8ZW?I}3d zU%d~G(rS~0JwH!2-b31S!Z#GyWQNniz zXI5W72k|uC8QENK24&Vq;9R6rC;Os?YzRzSq=?GNj#iaRkX(I!`lb~YhOZv61H+_u zI_m4+emdz-*s9v+)uldftM}kP`EIk9kZC(T6&1S@!mOuikp)LLdOhh-|4yE!5(ZXp z$I}Bsp`G&L`xfZ3W1ZBspJKM_d!`uWMVT(#HwZQbmjgR@TAX`4{BCo1dqV}L&U%7b zB_spXKJWc^r*g;Y4xtA9kHZw8Wd*{Odq_hjkvz-ZowEn+}Rt;j}6h<%40PVVBIccoKkx5@D?O3tix z=4FFwGi3Um2r0RL1)@6U0lgfHkM*@;k6P`-{q=?SP0c!tMrGYj|I`Ymek=>ECy2wgxBf4L8s(Djuy9DH#diy;j$ zEW@6VR|0mlNB;#S$p`n$rQjS`v)L~%t{vI;uy z&|NFtRd}*1mu3#3<0C3v?+nn}8lOB~cL7Toa8e*uDVl-N;r3zPjOiIBOM8x7&|_U; z2nGDy&h=s|UOjDTux;M2w%xx!=@Qe#HZy9~Z^IFXJf9GY8>g&iEJ1;BQ27gf@=>EJ zpo+{8fSo|O$C59lf3eraqIqs-DBr}5%2pT~r)E2OFf9P;-oTWgb?WFKwSxvmowSaI zZ74G*-O`5BGOcr(5$s{{I1H%w#~vbc6O+92oF(5T_dA6Mdl+K-*vV_wpnm*+_fWnF zO7^u^@CUb9%{HT-!E#h%0?K_JdV7u(;3Z$xZsj4La@3@^Wk@ihjL{MdWYVtQ zvGyr_W@tqNO^6;Z&yTgIN3wFuD!=Aya*jzMA!>orrB)s`hNfcBWNR3e)5((ghNz{S z*C9~#iDa~n<x)6aCYkO8>8(0jTitqj+e zc#liuwf7QKs{6aLjkvPc_p^;ETyE(Ez2UW=xl6t>jpGH8!gPtp1~bk?Sjh@q(DTEO zVT5GgNPyhtFDgEg{eL*EJSMNmARv?r)b<3tB z=QOODvKBqTj3C4YS)P8owsgl9$oawY!J61uc*5?r3P>u}0AJujQJc`JP{QeQx_|!9 zs<=4~BDAx#nV&WLV4;D05a-40rfG+DIX%?v=v{!l!z-D^$cK-`M6YsOivAY!GYoPP zwXx$zVmtVnj_|FW>l)IHmPYkacu=bOEl%b>$^X=^Xwu-8c#%~3l#sx4NOF+GPKfRn zZwZ)u-ZV6mS-fkIg|cvL_8dk=MYsYu*@0pomM05$Pz~7YU~jj8eFd}0$|m)PKxTnD z+BwU$bqLy+gW zSir$Nm{4hv%LQ&7`NxY#&HwEhtSrsjXrUgJHN$1S<$#WuD1l%p(_5385xWcF(hYe+ zceWG8wuQ$vfbHyn4*YWY5c|5bBd(U#3rG?D0&#ya44BUXjs^if(?(*GR5m*!D=C!J z3BC?!U2Co(ECKij#<(U(IRuLu_AAezb686&f~Eiq!(i(sYc)$;q!%hs1yV5ddBz+H zu0-)IbxFaLh*&ftH&l#**D>24yy2g6ro0J{3VYEQo~z$ezrVm>?-5-xod2vu8zhiA zU&Xim`D;6Chq%zCSXKZ-(wQfWmLzR+Q**(}ETvrWRy)i_GF_80A_TS?O%};2Zy{O@ zvi$w7N4+%>w5>Oga`Tag1Kb<}t1JrCvI<|~!~uU?g~*}=RiKLqY-$YwcIF$Sk@(qa z9f#6&ve9~o1{QN8fKH+9#j76Z3dV$mY91|4f0bE50&01EtlnyviJC#!N>w@b*Qea< zv_#BdC~F6Pz6ieG8GvDV`wA3W!3szGAEl7Jcrk)T6ok~HE$Uk%*> z;vRQO-ekdR+nlcYqGBA__YwC~-4BJ-m;>*MCG)e+INvr6R1j`Z&T|%a2@&4;*xo5K z>OE71a#(<3OKXjvxaS=|Z7~Rf`nTb&0U;eT>~r7W@hrmrb`=+AZJ%=j=d|95~^dT&=Jp4lkxsn&aUF!m&Z+;Tfc42fNT zw5Jznjmq=RmM7E^YnvzwFTF7>+Z1vVU!s5XPfb?zYH317*`4vgek~y4u>@?KW5ky> z>SOjmzjf9(jp>|+-33-su19xoqCI7GC$TSthJJdFy)A{quDsAlK$IanNgMq zrJ4){@KdS^TTVv>t}+4Ly2WvVk#=I0PUE-=mwR%&75-+6D$kP_o1jb>ZRrntfP=f` zLppHB$ ziJ$(X{c0(L!J!Fdckkv3nz(sOO|HlwD%VG_x8frEv%kA5%O9C5z{dtedG zaJ~VEd8Z6~?T(Czu1m|bbj{`zJSB#esN537%!%`Z)?C3B>~mxoqox03sMXC-K-#bI zSPxQQ<_=!G!15l8t&p!JLaC+d67XI^4$UNVP-gLEf=4D1lI_X9_=^kI4HO z&^#-rOH?y~oOiviNuntmfpFy|(rTrxF3zrvfmZufWP{CIp<$YFa!qb;99@cqz*E$s z7|UxFKpRZAL#s{MH3**4M3jlzn(&NF85>_`&OP$nMz)p^zb! zLq-jN?y9Ct56yl6U7=G16`=`G;ud?eaf9`Q->s}rJuMFkqLQBNFQM}ugN;lU^ zL6Lb%%absB`<0dRujkdtXcmc6(CsPM^wQJ7y#3&I(NiqgFqO0&#jMZ}p>#@G&48f^ zIz7^+!T&x`WFM^%>2Y>Z3&ry#l`6%fAKoxg_I%a4HI(7y`GfyKjZ1M4zt7v9r3TJ= zWolHX8{IunJSK8HSK93-CvII3{9Q+!ZpVbHnOMNj8Nhqv8 z|M71baq<4#Sz0>0CiMS$48JAV_4WI?+d7rpoLwSB^#6W%4A;;1|Gi&YvL_@U^z(Xq zySv{*%qQ^szTbJg-4hh}etusXvJ`uMc?lQH|E*`u>+}EkI(^)TH_YF~5AOZSwYT^8 z@_5CHUdI0s_47R3Owv|zq45W4A1IsI}%ORiK`D@!w5L4S>X?Ug=BXP?5q`0SL+vAy%xsUZ|s_(V?}s;Lk3 zX@?8i$tSf=_XED#k>}OMdp&1$T^MI3a>x#R(K);$Ldy$tV8-C96!qQgW5*|2g8Ocg zRLT1kc*7D8fzhBjkQ8AAq5$fgb$b|rPC$#`KW;4pRaGN&ALn4f$;hb+IBkmzKv_2Y z>rJ?pDFSaC1Y#h&0eA!*$Z-<6bNCslqN4ZHNZO{|D42V~gx7gg9;T-EO@T!0W-qLy z-;RsU@e@M!GYEXwnSU&(*wboZo047m3wv{)7mrU+i!v6und^g$Ed1~M9yM{|gVaJP zO6f%xQi*>|KB^L-Kzs<@)F1tzAoGAx;(}aKuIP%%9;q^6TJqvPQ?IPEp=T8Q{O!se zzlsVpgS4i5w1d3>JS3ml{Ik_hY}ur;xmWg$aV~Wvp2QJ;YtoxB*QkqUP>P2RmN>m_ zufXUbGGJdrlECKm{pqMS_wpmAwB=3*OEAAb0(9$1X|q4W>S%FP3dv= zqLrc_@5!fyw6+-vx1#CUL$vZx-deEZ?9rsUHo7tr&&g>?>Z5x2&sB&qPGMsMeWY!1 z+>t;3BLacK!=J6Rc(oF_94S&Qy&p;^z5$Pwl0f)LzsBQ?%ih-VvCOFECY9ajkrs9C zYdb98oR=b}#}3`~YTwaryt{p>3p&0T9n9m!w%Tt6pdzxek;>=f3w_F@M1$3j+};_d zlQPGnE*Y6qsoB-yCZ>HkE?+9Rd|LXekUVpr87(CZW`WWG)XZ9i%#26sqy&-KF$BO8 zYdujX2&fPK^Rwd@R^vK2(^$AZQl!E3-%a+3a&znl`{k+~z3o6T$pAvnvSHZJl7sV0 z5fZ2fW1PWVtu*i$I`j_nauXp(BMP>;Z-%C$IL*!pJ_T(e>!e;^yCr8JRbMZwXk49% zgshCyu%`PCI{bd-@Et{)mv&$H)=XUq^Kn(5qzb6omv3&`FAU|1UyiHIgqB}jYLmzu zZ&?oP=<1M)XCbLe&=Rb`vw$ERJoXmba1&jgNd7;4UZaj)XyYCU%4+JTh3YwZzL8|` zJ5#wkN|zp(2+U#xtLReNMKYIj_@C7whXUq6>`B+034q^h@gF6TXZ=I~p$#$U>-A8- zX!}->zZJw4G1Lq}*k|z?=oKaLsV=ejIj6-#C9XmqFjb|)Lo8w%6B!US;>J1Xuukj=f6j9mY-l9bF+13kQHt?(MHTt~ZH$tHCNF1JpW zd6sAnG3^$%>d;793vKljT=IMCb3z}|kg~dRGOJKQI(Mu-o;02&y^_O4|+ym@qs z7e1?$?)WRWNp@sz&hGTRXtfbganI`zKvlV;f1kgt)UOggcK!KK#{?cA)lK=PU0Uf` z1Hk{~#)|)|a4R0U(HLa}>7@nnyxcuFhe0qm{}Hf1)ZY#AYRx4MZwBnWjNiNvg?iN8 zsvon#SRV7aLk*f^HHymZoYS!wdSHSyNYaW&49) zSzvTS0!p$QRcyu5TQGmX6Qs=2SDTt1rF9|9aO#;YddKj5dX@_cf!0W|cSW}oDg?t! z!ArjKz$a3-d-E7UJFsYH&&>y?6bu~eqdQHM*4ig5bgKwm*CZb}a3MLZ@2fv0g`?@c ze5E0SDCqyyo|9{E9S=W)?L1;-zGWmx8BSQTTu&9Q_+szw3EZK7UGOxwd%AkFa0jo4 zE7|jtaj~2*Xnz!v-5^KJ07Uer*r-np^uRtvrNva7riy$H$VcEwwR zs`&R0pR$1S{Ipgb@6y!s`lR%&;Fd>4;&bmB35!L$cQL6>LYm_)GnYVeGt-KuP(U^o ziIhE_r|EJZsY0G{zIwx?sJt{mjk7vour$uTlDmm@*64q_w-=k!j+74G;3!8>WHoD3 zn;li@CM4^~KH?-6e=z%tjge8rw2!22llR0ObKQU$o8N-Zcc@Iu6V7lFxfiO+RqBu7xOU-jg=?MDH5eCU@Dz&%P#QCBF>Wxb}pf((Yu1Z?D)!Q`RAEI2sbNG zsX=DrAFW_XDK47z@opkNIcYRrC)v+7>tlkP|JqA8i`=G|%_1z9Ek}EtHoYpUYW@M}gU?Q2oJ3+R6HOHVk7%Ufh4> zOm{bj+>}%Ev_EF(3}8HFYtx=(tX~|xBxn3%Zu=s=wgagO@!cdIVI5nH^s*YN%`7IW zJsi$NH!;^`JU~9q6ff)`Srqo2M~iD+tw+Q>$cn?32-pt!^WP{TGR~(fn^pP@$Kwc-y@B3*=tvG6a~*{~I4xLe zr*dkmfzEfEjjzggQ7_XvZPU)KZMBtWX!2p|x3sgJiMv;~77Rr<$38xXSXf95cdg|y z#Y7pMc;xhYw8_bO_RuF&$C5q^_XwBeuU^!}IX+D~LX{OJ&5b41ZJQRe%Wll4E{(Da zr?&1~pq;tgIm$o%MQI8kteWj*z}&axCBWKUN_k=|E+2DdyDTZ9yGmD-`WkK z{;YbY5bfyf=JUPCyU)k(<49m4fOYc|4QKZy!c&luIGrI=pzz}h}c z_bbPRjc?E3qrK|R`UJTf>bv$Pp!$D41^lEzJwc;7q4a3&kNNTbOX&X&cw{thdZ8)4 z*JVc_4A}1WE+ov%Cmd=Bfgwo%6T*GJ@yV~W-Juc*BG|j-+}%{F)o4jO^+1-8Q%9e4 zHrWrtg2bDB=sP+7!b@?IdpnM=^2&9Sg67>8?5oy|CdBT)g{t%2D)H{Ve@T|PKPaYP z4$_qbKRT=Bo4rJNumW{<;sOrqV`UKG5R%duYkSew%n&^QE9~B@rlkVz-QiqPus!Dj! z3tdC~NmZ4Ys4!aKMZQgy_nKnWHGJVMTl?mxU8!T>Mg0%jgQzaPfh@EJ!x-#UN> zW677{D7#gl0~q3drms3>C8Ld&ekQDQ%!#j=KYn?!nC{p$t{5sSEK>j;A4!%+y^kl~ zb7%6QjUQY!ekurTvYQeb$oS!MRAV~yDV*x^0vMMeDAG=)J#P63Ad}5yul?)XR-nVXqdcrDn$$<8( zcN5x{LcF)%`W(GwY|O%jzMe6QUVDs&|8uSfB07oQ8^(i`gpb}VP??j6e-X2F`k4HOUALS8G<9jWB6= z0nhGE_*npMdCdKhe^9Maz@|rf?cBJJ0z{b4W7T+#K%ySqz)*-n^oMBiei*hD3)Ff` z#Z}UAYKHaO7S%7T^T6udiPne8I3Nw*4I>vz%)2;6EzeopM@lcW$;DF`9JJvK^h$~gc z;Xs=?&Qgr9ToEIyGOfrR5YUP~=r=^bLz5FsjIeE8Hv-(%n??%a_r^;rJ-Hfs+Zh{J zD>T8Re3o-kJFG;g2lX_2x__hbdZBQE$XgYMrk@^~{@H;Vomq`;y+Gd6C)`d8!fd+Y z{%PUc+lZshE{G9*kZadm|L}oXFXVdW$5DERkJ_m?!NMGj)9XXA^L;>6sDGhx-6$b1 zc;#N5m8-e(yIKPY!MU$6?!bwiAR|Y+VRcd2T)|p5=hpy_YT5rQ!A|;88ICqrf(4TG zsZbZh9ab5@?_ zbZUvM%~a|k(PVl7v&b=!_22|nzLU)P`uB=vLAgUSNJ$t|DRIj&iX6L*9nKv;o2B4~ z#e+Iz4K8G1tI3>iSvdVCRS98Aok~{U&zYuRVJsMd%4@9wIxr2wb1Hu%*s1Z4?g=58 z@Ua#d=z6TGq4xc9;58%J+#5TH0I)I60ce4~((&z-mv}{CATX%9$^5ejP5$#_bclph zDiva$1I?(q>f6;Lsessrbyl`jQT%uul(P;M`5Gg!0g088wbl ziX%2vocUVrbwO#*r|2Rz@>zmv1EGYfDzV*cWg@C2y~imiG8x@F9|rdge;YgsW>~8& zWZo2wzXL-4Z|@gFoZFC#va@nGP9t{Yd;zi{W{Oni%engNwsY5(Y5FMe*mkB;RVDrN zpY<0(`1Qm!c7)1~14I7#ZUZg+%R>=xY5@2$53+af%V6QQ3S<3GaF*Vm=(0K z2SrIC&XD1D*9?Xfm?kZU(grosL{jLycZ$%E0Nt;#Igg7p4(2v%L5-4L)It`w!p37q zDJ0#TP9pXBOxJ>v07`tO5xfwK2CBwYE`rEJ?Mw@@pMh%HPza5pNR5qefMYm0 zC=9O81G~rEncc#k!&H9r^BnrmF8}vOc>dq#GyC4(i!A)!zsoUy|L@s#bb15!{Mp~D zIQqZWrMNwxzqvU2-}fihyr@h35b4v3`1ipmCc+i%Spx;l8$73l;X7fbpI-?CPZki5#}Xzg9S^i_ISMv)o+ zgyh3+-;w{2zvwFw@d#IqCONB%tgW9AT;5bRMbq3=tG4Z=T$PlVe!n*LkzDDL|BR;f0TT zhYxJc#Ua3)X(_O}j@3)bnteuwHNeEV=FkU2_$s;au!SE*9}!Uy7NLzIH!!3#o}?H3 zdHK#(SRAkwg*F$+WGIBD0@NrmD+^B7Jzj)XR47Oqw#Gy_i~@W*`FrT{|J7S|9Bzv;aZmH=h>pq-7oJUDv7bz+ubPc=_WOknFl$X)4v zX}X&)BMRpFc=?0+;zYJw8yh}c7=+ubQ(kF&LdWhaG4{GI&pG=z(*@xD3uLg{_@)3J z>4;Hp7!U<4^uR+$=b=%WEh#x0%g2%yYqg^#)6RvGFO;;WjZ#<U7F^cY#R70h;3q`*xF zYDw`-@U_o?0gr))s*nc8_}+Vuzm(_#A83f?Cnq2XyqLTq6XONTm%oE=#nO;mibZTK zj#Jt?6|)EzI9)q-nyfCRv>?PEb6xp>Et#YanW*bZNEB_$sG@t+B!CxO>~hz}AR7K7 zT8Bpqg=I(m{n8xQNY@O%hm8QmMMN|ilA{^75YE)T^Qp}~66T!G{a(1EzMkNuCEmXG zs-RqVF1Y*&HcJqHY~m`sx88lFL`Yc8RhuM$l5cRbSy|7muql4~F<`-PEO);O;_l^UtOlqs-v97GHc6e!Fw5GmN4g$4b>isL|x%On`jHJlc-u$~KlaNDmeQ^&9 zM-dD@j_yyDjE{K#<2h5At40ha+wiu(d6scst9W8Op5i@V(tz>6?mFu=r29ver?-i- z39^FoGeg-cGdAz5$yj&i6&A)@XBr3nL7Z7W4dD>PY3K7;!KncC)Ab%C(P2af#E+$~ z*jp4*B#S7WAesG zGM>Yo3mOO9t%_%*xP|2jIaBV*iwkxzx+`Q_a^Aa8P={slvjL2=l-Us;b={Yae$mt8 z2*A1XH~}VC=3@zq(pw%`L5c4D#0fFvz$UbtBjpoy;dF4eK$PiZM;$??5^rc9wT=ArPh@+7%n@j zjix0c8&PM=-m;Z&H{GJzzK#2G$q_&O*|YQJ%fvewI>cn%Fg^O8DWlMy#_wLo{6#+} z!IaL5z^2}ez>a$8H>Ev`gbB=#R*sQA06s=Sd*2Iy+M`qX-N>%dn1yk&z+lix2L>Y~ z7@5qCLnI-G18tF^p4Xe0ux4}*=KdDPW=Ax}f7(wLSN5z3P zjT)?QgzII^7A-Y5P7J0MC!O0h#4&6DCfgQXJW`YLyG5v(W;Dm6bi#&CmPJjQl^ix# zH*o&Gp1qb;+6LC6G$Ax7v-$)vUkUpeENd;RNk2ZSNyI^~zEoWtLl9XO$7A-m();v` z2EaKt(*^1#IQ?7}CT^zRh{M@f6-u4yWGUB;_bwHbx&1al#ia(Of$4629}8T#8@K%6lpEV{D?BT|<_1{O~Q9EHj0omAVJd zS8Aq&9KqKlJ(U8$PqDQ*7KNqf-Z|T;Jb!M0v9mA!@5g@{Yim)UcYUB2{pYzo%*F*# z=`A)aeKHFFl7JF>@9t&Y(7rDe19M0cX{1&;uB#;H-qRNCfbhs7ODq1NUyn(4-;z5E zphvlUC+Bg{jLCFs&sfxqGiVnlB(xEsT%^~m~a@B zK83tE`=b#UqG_Fkc1F>V=2EImr3eeWj3b-!L5#1*ylN=#d<*w!0JM?(io2HDqXqvj zVc%E9h<>pIIU8t41U%E$jgTZITpmEP6iAH8gqz4HV4SU_7sv)CFeR(>re>S&j&e%G z48kHWy~w-3kL0I&k_Ztt{cNY|SZt+>LBO-$l!wywV`5IUwfV;ewtaV&Ms{o_)%n~x zp_#LAduUf3jIf;Dv2=aC5C~j(aYW|A4{0lnK3i87xZdWRa?e6CdK#{A1tMUu44+n? zncmBZ7)ke5m`++yU@GCh6dyHtRc<61s^D&OO-ah#pG7%~XhTI_(oHpMp&@~SnWR?Q zbQ=gK3+OUUV{xElp{lWz+*Gi&1^g{C%$=yED+WU!g$iAJqJCFnh*V)ralL^~FUbVt z5IX3mmB$9cefm}e+DFwcW}#;=?N^#>rMU{USUx$sl(JLv!1IL0ZcujgK$7tI z&5_rJ`|LR#OcP&ERn4eLswYd{tu!!&>t@QY63peS=Va~5DPv&oYQ#xtN)Csa4l zP__Ldjg|5i4!a#dW%n5^j>~L#HM=>>BVcPrpHAcHa%!behopz;Bi(SFAPhJ1L}yTZ z8S3=P)D#K6Q6sd(4c+k35P~LB2sSm3OQ0!AJXpwTD&x-jh9q}fVHXYeVTieDO^Ddg zm0LOI4QVwuA7_i~GBd}VZUGK<6+?_Q<%2(zW3x*~H1>I3tQoJFpg!8=789&q4!~sU zg*%}H#&dN<_Ehl3i216`PH`5^i>RMkXWI_7)@(Pmc4^hjpQsu4H_PJ7yUs#)#U+IA zBRilW4i|lLn_pq}HC@bx+tzly2g~a=?%?Vot*f8I@|#<~5=3=Vs4b|vnpSjpFs>c` z3^*8U4b^|B|NN7rP|1>6?1Nx-i)oLn5#JIdr^P`NT=W2vM9Om{h+e=6D5(B`-x3jn zeu((UNwLiUT63DvOiW|8G>VQL-7U~MUW`VHF){t-eR(hXHu>nGHY@Kvvt`W_R$ zqORfR*Y1qXp%mVvQuRb84G6FUx^5!U|7d_koQOmeHObGSaNkZtvM=25Y>D(kB75%s z`IIx>zl}l}csx$F64RSbr9m4kQQs80h>7Aa5c25|pzmSUHBuvc^fOId$Z`!MqRw(; zU~U^l*0`l1#l?KyK5K>f2Ag%(e0}xLd1mOJJdyWV2I?AjU9%x=93y>eo%_#RGvcX+ z+>>A=T-esHcnvj~IJI}5yl`Uf5ai3^w>;f4h{HYldNdqTzc1u|T*G-EUxy_N?DY@r zj4~Xv=!YW_PYyD3PL_H52T-N!i`8HKrhBNmo}raG+=m56NHa#o>~escI2Qr7Y}hEB zQegdh!(V7l)>#7;dDS``Y0DQk3l>NwT+{P&QH1M)V5CYY#uxwtwzddF3@6;j6Sr>Q!Ew5N_|1R_6kn?$$57hSE&qjG`#`}0 z2NBS})2k#j^+XXa{l>xc1Pdc7pJ^?<$fC>_{DWioeLrr`==XkJ+Mv1rUyu6W8-l~6 zncMFE%5o?Nil3&Rdd60AgphDFA~yvpbx!xd>O8q)Uch7u?0pK$kXux0?=S6$l3pxx zKo>4v-D9AjZ?2CBGDvV5iJaV#0ZqvU2zF>xO7d#ip_cFTYBz{&sXaj*yMHj-Vf?|R zG?6CQ`N6ZkM;YLAp_(z&DjluWfxXDFu8-5cQ^#M(WQL_jb=c@&e+?MjhGni;dLEZ$ zw<(1h@l4EaIeE)k?<7YT%ny?&o@Nho!fLZ$W}Y>Vz#!4<$v_`9)q0QGHsacd)S2#b zNrM3`FPv0|i0cN^3ZJeh4Z-MB7ph(~q6C^doj5FhR8W-JRzX9cQKxn-1^BFX{|;Oj z9_!{qv2tTF%=k~R^~{hkBWd5!fj}5|u}Viob-;7jh!>6>!zuI&(oL7_-^R%--|7e& z2#QYFp6K35*4QG;m9JJ)hphb4XR5V&3Pn(KTLpfrw7`5>CztZ*^GkE-OCYT*s-co# z>IWK~KwbZs0dRN#2<@HfByq*y?cSI^w27FS4LlTSM``}8p5B)NOYbinKslHY6!r?m zWpTKE9Gt(|!CIii^g*_xA33R+#-ThBADFxmyz}oK-LxxXaPFJ^pR(QR%u(c^L*TBS z91zm!x>r9jK_-+iAlP6oP%#{F7zx5NzHP2e&GqgHC}UjB`fYDVwQ98lbi3P8dWw>C z)COWvluQ&8qA-0V7E^_lS1(8}&aej3bb9HwXwtyjbb(FV)-hR~3Fbq<*oxn;jLcL5 z?wD1j@k@e4+HBx*7+(u%OxRZw@V3*sf>G>_uw5(o=XP^|HTgxiEv>-761rmq?UctE zYX39WAn%&vn3rT+D=^8Gsxft0#n(2@)oQE6OUS_tP63*gXU@uo9ukKEJ;_#H6uq370i(8$}b7NVuDaBO3vJTrVEU(HGG|8}V z09Cx|BhTyZbA5U?*=KCz0||hZ%L}X5e5)lzOFn541xmf?-im5x3Nb3Wnr>R(>_+uA zN|*c?)bn#)blup5PIG!WPYFrvEzI&`H^eRySz*NrcJM{g%l!}OlW}IX@giwX7?fT+ zoE{8wzhL$Lc;ZUr{JQ@J)@O4kspl{$<3Zxfa+vX&u~Q0AH$oI`*?mDDTk^!lA7m{u zTX=QeI*NoaX_hR5BFx-S)}?Psxy-iZwyH+HNeBj6KQ}j>l0{>zI(f}!r(B2-BEZ<2 zY?_n?6Yg9&HiwlPx@D@d(Xm~TWG1IgVdJxAo*%DYj~OI zwGy5!@}t^ysNV%zit2pc1ZAR(${~%=>!xEU6z-e}(twhbsVlaln0|G8Oix2C|6@02 zO|Ff;al;szWpPKWom zf^B!r+eP-5KwoLmc9bN;?}ifD@zYG3PKT)BXdMx2M{Bl7X!3G_24wggSNInRURh^G zt4dvClhM#6cbnj$GP5);@XS;o@aVTWI7Z$=0a~Hf%FNjxe$4o+2Rf@A0`~sw@Q!v~ zD1eorj@oGvepEx<#tbpFAB}7Q{j9m7=tCnsr*%M(nw?w33WWy8GB3~D%}3oRirWo` zUkx@Ct^^D0!+sUYI1}V+hs9_2SaVjNtH)}i+8MA9hbwTe$RrI4sc0S_%wIIek6ORg zV3D7)d9lonWBB(^8(ttq=)N1>8IA1RxBPRC6qSh7R!L%_#?VoZ(r+nu+B`2C&!*0^ z3NRSVMI*c>W?fmYS|F?YkI@|uw%ar{MiG(~S&avfgtUn4Zt7I~zt&{^M&xa;S0dRA zK?3Im^z;CxoCm6kFZSaE^%5MXNHfW(o$Q1C;3dA*4EjRMG^&qElA++l(C`~p)FAKNF=ex?Hum5Vixq_l6sm zJm8#e~m;#BF zjkuH)A+G=a)wYJ|$3|su)|vrvr7fgjp49%flM(neM5*eyD=i#Aus4zz>k=F^S!;Uw zjCNuCEOdkT2OFMBTz+j^{3o-o$`U=H$&;6jD&n=5HK!cv;4b}vu@9Oly}L0N0L-HY zS1J=m6)sqZ%Q3PwmYuv5o{UR9`dg!v=TR}qT!4b-qs=e#qOj3sMCw&TYORYoGgzv3}{yo1pAkN8{^s28sz!xH{S3Bw+Cf`n=cfuAlCh(HuA)uqY;*r&+&Ar+3xw zR`^U6N=~2lS2vNDJE%NucwY{6n@eKm9i1HIYQkd7pDtbg zrGI)R(&&izb#d+taF|-l80`-mXkic)CCh}ANDUAb1JBQ^0MCsn?stxy4fIIwR^URP zfbGW5-6dI93-b`^uCuOPM*{rTd#TsnCJO&sh1zmh_q~k|WUAZ*z}InY#+INI3{!6P zM4P=f77S9A9_ss8tjdMrsUJntu2si++`W(T$G58#+>|@2;YIbXNGR|%oQLW6o5re; zz1WSs2MbdvEfe*mTUAi5W60xc!$3L|>p_Wr=o=pS;)xcN0J*mLH)L50Q8&EwrxF@G zwFV=Bp>$JG;Ew$g28xymJQ2p|$Lwk?upa5W@kWL-0W#v+xq#bi=+*>~F<8jB5m^zR z1Jw|GNF!@ZEDbGEE&6h`>_e4p;x?0+9#XYPJKe!}YZexEOn+T`{5{=Q%O;`l9YcA!u5 z7bZS`AAa`we1D!l(#E>*{r*KUkL>$?Z>HMvUXI@4_O8nOm+|fQip9OYpX=J}M~`;C z#^m{}9!6NR`I;rA{a-5ogUPd$a8t)xR=9K&o#tEFuYdsgPYvQs{BDXlg#66<7;~lB z=r;;3A6d|!NqAgA+5Brq6XayL+xb~2*#aJ;awWn{?#rzNKShTI20N{51x+G(Mw69m zLklMnki)GLH|G3;RXAs+UP>Edmvqmr;KoTBf<~D+6cYlDoKvF$r{uQdlQ3+2pw_Jz z2ZwHArSUCo5stiL`9a$Jh^y5R`WFlj9bY`uAkCMlv{(%}Vbr;bN13BkFZM^6a>&Ya z-bGWu#+{GU&7Anxhf+7fp$!_S$gzx8zg{(fA&qjnIA)KxDRkpFwd(|JT5Hi^mlsqV z*5=RCvTFONmxa=QU5IFdyY=}}9a*_&eBOeG z!1o&@jJkthZo8la2~E}_9#stC7#ncbN{MM|1&V7S%agd~4edKndU0+GA&l;n0v@5a zif1+oEH8UBL!dN_#2xje+=);Df=*Z3sL=+V^oipz(-UA$@umocfF$MX z{iZXv^2`^rVuHA}2wGB6!WpDa!N^xwm%?dQ0ybAZtYS5bz5DrbXZLK9382V3#eLnX zO&EvXcRWllC`0T`Nq4PxKDKhzBIWQIdqGb)s05EQ@bAdSx0bBGD-3UkmGbIZw6)A$iaJtE zL^@s@R-u~19RwYct38UD3}>-)kQ}1krC~XicoFka+L5~_Rs1^lPEBDI1PX=F6jjMS z2$Y&4mjkGY_dg&N83cmWW08{fsXq<+EY+?^3Wzyi&oE|0r2Em|zXioWVMT@EX%Zbp zpiBV?u-+d1l}q1SXbwdCv+JHN^*YrT)sTYu&8U297mSs>E#1Zu$7xvVqf5h#2t_&m zBr@?t0bW-R z%ssrZ0S9zlw111?HKI0LV1ELAl<%is8ll-zx9pL899AH+)eB=*cBkwBBPn zhHy19W>J9sl?mNi!xYbR*PdD&>F4?T-~IUha-*_V174c|PF2{;NP+L$*YCS3I znes zNBQ>W4n*Ax6k|i9h|dwDfhXU&#;VKK4VOeMxyL&9s)SHy$z8=|3liM^fB+{_$@VX& zJiD1qIhOh?1WxJh3&~j?qG~(95^sKzh!P84$}5%uui@EhAyqMHUo?z8LK@2|f5b7} zLq$*q+gMul@t^6-Me`O%m0wd)YTt!m}|AM0-92o zW)s0NBF>rhBTM}pS>Ht_#ij3sps7{&KE^|*sjKKe9Zg>YPs1l3F)lb%wQ`O{LxL8_ z>(Y_5YCCVfBrPu^OFs{yG#-WnXhv0G~P)LZZNNR3VweCaM){Dmmjk-^dk&{pPMur$C}K^i9&;d3Rg0 zT8Mf&N&0|jnIQM@k}A=gnwQcKl2_cM+hoglT(vLPVpB}BetE$wHqkz8nuP1@+LDAJ z_0xMu#-phgdS!p%1P4NH1viDogLB>BR`w!^XCxTIo#fG(n|arRwrCv#JhM^|5K9TQ z9Oe+Wf%31tq2+EwuT59l99>s9{wos)X9}B zU}34(pi-xf@%`+XQ$QBhc6P5HMt+*lY3`|F`EpT4wj}cn+GgP0b`|Bm!0wVz8BQMQ z-`c9;E~4ZEF^g@N`L$E>738~vik(rWS~nRVi-Tlnufto3QT>bWSwP!huaOf+M^*B& zkjP}T)*xUolTPm@+JZPg=ouUt=g(Zzfu|(GVZ-Im+H9e&9VamVwG2%aaGIlpqit9p zn<#SvuA8~>Nibpt{0g-Zm9MNNks2v$k>U}B#CUf@M%M6m0EDrocpu(HNh_}aT0zEV z-0bqA<=~0h#01Dq1Omymxf~Ko<-ejd(k+T5;!9QN=KtM#k7S7a)|@J@vY~M z1#sb@Mq+x`mYRA~?cc%8#frR}=3xB@TBH%F;*Nx*^jJwmp`xZch0Wb$?kLeDf`Cnx z(U+}D6|M(e$zvKJy{!+81FE&gzTT5%R_xt$1_zCN?qu=fJ{CR&r$;;#S^DD#e z>-Me5^DFz~@lUn8czumKn;X3uncCXgYc979%D4u65Mlj&Ao41}lFlo}rpgZGnW9!1 zM{0^bH63Xa=QSBa#5p?mmbkWvoY~pFe>#L=5ut95@?gGs!T_vb0P@1Th(8tILI5gY zb+HPc7cDXL#k)XBB1aWeXNsNLBQc0JZeAA72~dEsOL0V@E-GQkU0r4_Tly4+RI`xK zFDx!-K7|QJKx3^5-ZvuVy_q)!%^hq)_%NHOep!)JN;Ep40yT9R^;CmXw1GikF+a!{ ziW8{KK_PRV?uYV+AK3e#0@C4G6lO<$KsrePSzy^>Ad#d~(!v#!_2%UkNgKc`!L)9L zqN+gKZ_XHE0q&jt`l(c7%}_VrE*b^P9jx*55G$LHTZ zwOJU@UtZzA;P-#1JF0Ix4EBk(Lla_^@_&fh$1N>uLv)@k8bA-1Br(Zah1b+tl|OSM zel4&Q2S%Bh|Bsb~I$gAma7Qw~ld`r|VTSu0AE|Hy(jyce2S3>hjUBCW=7+ki)IrEB zpF*C2z_We*TeuWH%H-Ii?;PdIctSCj`Y5C!Ue=->7);}PHR|X-tF?iH#Sx_?iYHBM z$*y!4haA@TVTxfXRO*_9^~ zC@T$)Vk@N*XsrPVflM{m1i`ZTz*}-YEV)916~8}YrBPJ?2Zw`L2gS??xdXk3;RkZY zqrGw%@ISa5b*u|#{ZPpob;zb1E>7uo4aHc5(W$RS;DvBYf~C-}6Q{x)Dg7{KY9pr1 zilouY4Na@@1Wlrv4%BZNR$UgI`Gn1|J8o6ft0TEX$ZG+qvAR;SO9LLAsOWMOhefwU zAr90oyGO_TY|vY%;G=r!d3QG%w*5@eu)$y3b7Suni{Mg_NC8GoqE40lA@GtZg9M$r zOp(X~8FXqK8!u~2h-{VcHF=RU2kc8C08a&(3^a{cFS;bN62hR5PlN%d;EglSRQ`Cn zU5*XUDcbhN3GWrE3km1pUO<~@x7=VSvrwmHQ|wU~1ULzNqjRGKm4KMWog()F#(-MT zz1s}=)&_A3-D)g-Tx~dDZ6;m$0~+ZvTPEgmELa*VxH&BWME0H+e*^E8huS8*;W{DO zB1U|{=4CG9j7mv%;e<7L!w?^jinkk0p7E-7Fx z7Ah#tO(-XIpQP0Uf^W)(^ulvDrC|ieE1Be?be@Y}f*EkD(Vjd5L^ipnX=^VRd=5|d zq80#u(7eJVc}ul+y3LlIJhu)BZJ56QWMgoYZxyI6oW|h4=qv$FHgl7^_$X6b4+$~K zf=n7V$mIKGRL$1?D?j-+etW@1p8G+9Ct*FsX-IowVcU!KtlVta80f6n6i_+_LI)!q zqwfMUm{A@WpqX1(XgE_CYCY0%Pp`|PXt2vj3P(1Aix=Fp8qDI&Y=b$5m$cYo8lq=k zxAx3EZ#8gO_3x^;taU_a!3tast5!J*3MQspZ;+;fF{2~XQ-zoJIX$?S&q?MjC7He7IZ;m#+&ZLg+=Y^wY<~ZG!!u;ZnwC3m zPp*;7S9o8z%}7zz!p>Q18O>+@t8F+fLp#SIzUAK7X;E256W9hZp%r+nIN`GYGc~e{VIGj<5j$}0vhn1H<#Yk1K8zs^&l+U^= z1+GEye%Kf6m{CqmKH(`aA-DX1fq-YV2Aa6;z#aL@I`J4+sl2-JkVx{ofAa2=#!Tdd zHpK{~{FqDZmVs;t5d^!^+{Y!UBPV3p*b)bERIB9zITapb1jx64)@@0DkuYBsl)ag^ z-6~d?H~}Y(=H^yWFnZ(u18ZOYN-{2|WY7pBz96b#%@Z+9IRI(J_yEN&mlxkjL-mlZ zoIw1+S(px0Y(`Ym87OBUo#f*>bTZCf(RtYgnq_P#>lobH#?ik z2u2j&5~)HEEkSy26-dUIx1M4 zaLGpNV&qE9PDO-leMJI`WGLs}I*s|z}R-F>2#E|Ba@wUS<$&2+hxP-Y;c+B71h0C36+*2<6t z@U~eI$=4iqP|*meDX(}vO=dh=wcaH2wr-&DcJ)3LiI=?Vt^RB&SCfbkJ?DN=3cez9 z#W#YOr31h@T371NnNZ29d?ZO}r9~ui+q($R!jnZT6oBgwN1ba zye)wlU6J0R!61cH41kZjN~Y=bhJ`k)0tjU@6Sk@)oia4B7{(IoS{eT(S4>&tzpis# zctoGpH>a8$&YI*9eT$u?Nhz(@-Lx77hUDB)C6s#R7t0$tQmTHWqP_lkt@P&jM5tZN zo$mpV8@+$Ufh*3OlH_nW5RJpJE2jB@t|}-pcw>K~Q;2?C7+0NIWr|cf>RhBbQ|8!E z8YMhZmKOfg;Gj~^vyn;eqe!lhxD(#AIbhkt86`yiaJ}u>Q;mHm>WZx@jaY@?r$Fwx z!u(#7pIJL3Vc7)D^|g5>Fq{O(anJ)BeOZ__BOB76N}}+}PJ47;CieE_^>plJrmXBM zZ0Y0bVC}=<`^o3i{Y}(-YvfPUxZq614G8hB(R4W z+0q0SZ>HbEEWtnTOr=jlmR3O)`0*VEJe z<25Vl>#eTVo}X`9mv86y`+XtLFMX@2&hEC2oWJM&^5kS>=x3!X?8^S`&d$H{`+dk= z-TrRx?=$*Ky$zoIZS0qve9L);@8;q)I!2wH-%ehy*XQAMW+-f}?G61+y^Vcr(fl7a zo9?&e=Oiuf>Gk^MWaLFF`StiW9fNxJBbcO!9Jn!y4)qkquOkZw0 zDvSNj{*Dii)1#I@4!?xf_TQrHyWCbj*KW@rUGv`8Yi6tBn)P7+m9W~gE~AQ?Uu#Nx z&)Yp~Yuop|T6^;f!}OSTtw#-e3;Txap{8xIP3bLFc}I@*ZUlm>sUi91*cQLzJ=L>E zXvuw)Ej@vmm79(XKCHT#JcCfaMrioZ%`k*CXc?kwM>t*gNq!#b{f`Pc~8#v zcB2vz)4LH>4l^U_($l%ZmEN<_WxA($Q)AP{{*>vUA}*q$t>*$$&i8s|X11$EC^y^d zAbonpbTtQF=fXE{#l%SU0#_NgIDhrYuKS9&L+#qO9WQ@VFDlU1@{X*pc{0mcj-3uW zkB3}PM0UOCx`G7_L!Ql;UC^m0F>R4AYjz^yWhZv(&kbegN55_tf*jpl{GsG#vX1Ss*xK8KCIj%X z*&Mz?{6=+&p$B5gN1_8*O8TI(sR~6fYlyw8GQ|KiofuM2DZCdJAYa=E#?TB2M*uNo zo%atM5)=`O4`MwNJk_u^%b52ry@j=Vs)+)xL7b~RR740}e&qpCGEFFvFrK+)%9{j_ z7<`|#HF!QPd1m5Bxh9C4Ya08bj|5lMHlQH}sd>{~*b z1o`;F5Ur$MaY6-Csj^+M^rj6D<*BDZg;?z!!#<~;Y3OD(DHN<81?x1>|5d0P0aDa9*Ca#u^xg;z8zw5P2CMMgE8Mmfo4mrlDWqH#8_Z1__B zJv?Z3RGwyBHL2#y5c6qi{J=G&L9sj3rKoinY@ zjvyJAr6$~)nRtDmjP?a~^1@me zDrm=3d8Fnthbk{EU5J$R#h6;;RIVk*4Y0a$HJ(~Syu_}sZ zO!a?{B!@}eR?d)!a47xK+=)j-bjWAu2T8NA z@a5g1KfOm$Ri^}u`p(cGJD$>&f>gbBDo8&Kid+*)h=@eGNO#09x~HB14;KIK4v=Xc z7ReS4iP$rnSll!$(mA`awqygl!o|%RCn_&0ImwDZaklyh)<#T!EO##TC@>-~>^tD@ z%P7Pk)KOelG-D7W!AC1|&cOAh2jNKBc(^2UiXzlr!Q|2r6jgpI?l(YqvcG)<-+yOQ zcjhrWe!|8@%aa0q3MdtHF}BZ(tq;IO1b>Pl6mm(02Q!4r7v-GO6x;Dw=~gAk28HTv zHT{2{v<;oTTlvuUyqDt7>}qSq)HV4w&$&tq^QB@^kg2UynKi~*^VG1&Qg}?J&)cPs z39HBDat-#LQs}0>`b@D@H&}*@!MAC@X%PEe<{a6485WX`BZt157t1&XSfa9~w2+c{ zDt*a;in2>u6}Bo5giVPixNO_$(tCmzeC)&G(#oNNGzxUT-u&{A&FJA+p=|p1mMiB# zPO62H6`94&NY(h6AU^>nS=c}sr4TSxk8=OfB~)7q2Z3D1-3gb+)M}0(u0Im8pF7z( zI-oNLn^X@^uMpk|Bkkt}TZY8xJPj9vg=713w$TFKc?J(jdibz5G#(9Br@~50TZDjx)OwGYo{}$X>l4wF1r$^~Wy?;oEJQZUU_=iW z6sUmbx=;LM$173LYAT?K^C2^Ups!mc8RcTBc+o5(Ft~zC;x3wm?PeV6##eqd83m1U zQ*MPy_rIiHmx>%H@Zutau~o1I-Y73EDm28R@@_%_+ZD*P60WeUsUprs7sn*CiWb&P ztD%K)jVUYw?jS6!6DqMj1FG5BWrBw#GdV#BUt*P-P!Q*e8?QD@C~z{>FbV)$6FtvL z!_Alo>m7u?3?qF=F_)w*_f{m+#muCP7*7IJm4{O( zAD?-m9rUO=@EPqYOAZz{K4Z(?DJd;)~e0_M{~2c%8BT+55zYXZ(B9d#!}>@AQITWsV`gS#i|NLB+mjY z0qkUYP)#Ar*shB{>A4S6k$)lFt{+y*>7xDsp>}%s#V{y5;ZGS8CH9e$q(BO{m8RsB z2Sj1(JUl4>HsALSV$Dm28q$qzdd6xlo?Gp79)Y};1U_t)tSRI(pcp5DfpClrn}!qE z1BW2cApJ4UBTU!^+rn1$APpkmf-ePG2Lt)omif|Xem@Fhy&?bxo9%>3X0H8C074Z6 zj5!+x2jX-Nk&}CJc`eLb`)k2^bUL$OFC`E~yKiLtIrQezUVXqXwVBJEUjI5Rs;;s{X*~mR|N1#-u;JfF4)VNRpzrm ziEJpJ*`(UeN4%?Mb3vvM!u=0B*5R{=X~aH4Olb!y;GK!@a}N2m8T=GJv0e4t7Q+Zp z7q(V-ewglw?-D(>pNp-x52e4e$8kfxaMN5gt>#f;&`-*Ga4UsGkl1^W!nzdDmjkoS z(u(wGXU`f@b#=2cq>MLHcAw31)A902Ygg@tN_)==63inAQn3QS((Gl({44GE{dDt1 zD}A#!p@cAMQ<*$nx5g~vxLgb1Xg%!!9cV%RKLAretiO3D2G=PhDf{lJrY{j>k>lG5VeInkn-*DcXi6sKo>bC7SXLcZg_Sr zA1nZWRJ@)qKsx0+jKF==3oT!jqemp-t4Yphh`#rF>Pa$?2s6Z_U{B5TAQajhdl{9Or0_)P}s$#Wrk zdC&+eBac<$DKPR`nen3g5}8L@6g{#=$!zN4+$;CN@FPq;c5~&MqFb!YRYO`_ENkXt z1hhPsUMRxA*3NHz+sJr;Gstk`Vr%&rP`O|Kbdiye<2*(3mvz{0g*J_>R^NG220r_` zN4kS3jszHmUs+|UJ+eJ|^0;`f?8*(Eri&Yv|E#{qzMh9tP-RQ5w)RtslBOf)s~VYSU9`X zAjdX9Pc^l^T{!{$3re*eO(B92IRLewSu2Qlht2=?;;8xg*zcyrgsh2G0)+Qs+4*6r zEeIr)BWKzW<7i94!BQk8P9>K{_7D9p3|fOVhkW1-xX~g#r6vl18bUg7*I!~JvnXwG zN*G(LWhA%sa(=6Y^09_15J~(y)S#%A`BQnn9zN3K9nbPZTUq9;V9x;N!hsaN8fPDt z)Nna6k2FVbr}mw}cKX(<1QYTU@Xe=Zz;)JL1E!SWt{fp8U{^c?S=-(Q9ro-up z5>nCHPHTJ6@8K`v?{a76s8B|ca%U7nr&>y`9j%7=-S1Ws4=Q@sdJpWZ#h@8`r!k^V zq(O78q{{+E>%_vGuUZvA{2IINx#`rV!^Q%nKVzY2XNTbICl_RuU&G%W!$PbnI4l6% z_ICNL9_cTdwLW|P-4p)p{}3rW*VymMYFK0nrhlI>3F-^bWb|LbQ7jX$lV4?aQP=i! z?6|3ZcY^eq6ETf@=@EhU0`^}T!ax$5dqCvdr)C9o5c;qP8rBSGRF5wnv=_;u`ulyTKRt)#5^Oc~n7r)@LW=JEHqg^GZ(z3{p4*jkKsRt&uv5{`LQ1 zN;toh62bx9Fg~1|v)i3-^H(W)1ywC*T%A`wur1OW)s>oB^6|u_r%nxB&`1g~-)V26 zO>)Q`{#cIV&2d~S2S5| zE_fLN9t1G8vZ_yPm zJmbE#)80oWHGP|1o^QFftdM4Kv^+Rr_N|fQ>RTgJzU@rt$orIM;w=TCwPr1X(_A`j z?JOECzCVLMR=ZjAyUs&IId|mR2M#msZM3WdowprTQsJY!sQ*rgf3`c*=iw#USG@G~ z%)HQI;k+?u5f@m)PPBv$VtXFQZsI{pKJO3IOO2L$ySr~<9ijc;f0hr}_kD8$@_w*R zfT%H0x7PRVIZM(Qy>EH2Q3MoeeqB z48k5UIa16X-tM>|7>rxS5W__OV!vE^vpSM6ytm~4aF)zw=xcSiSdpwEp(Khl3cC?w zOh7TV1or=*_o?dR2|)s7#4;PiS3Y%4cU|6#xc(6{CFBFJmf;@2%JGCCp#?y?|3FfGoqb%GK@M-njMD0rM{`SH$*kehvOBQLV zS$vhijmn7)-gmTwZvBsXT`jpjVst4M6t-d%$3}b|L>RXzI4D8~?s=S_1q5L#m~Ga& z5wdPE(n{ZAG5^es#WC$VGj!EF0bVm%Z4U2zuP2;pqgsqdNgHaU5h66WqBlq{bmyko(o{^JwpB_z)?M$2 z{-RSZ-P)F3{2%7j;bbgy@X6}M7^Ov*q~4_mnwO(tCbrV+l`S*=YH{dfE>mO#lEq~D z{2B6UZ#HF_5Fc~XEHvuy(%gblJmWXF(k{GgfUpr%@7vqinX)>(1_HI>9B2G zu~ms;c1E7c)6k4M(a5&Sl%Xsez|P>VzILiuqZ*ZS69F(Fw~J4&+US}ZoU2yCX%yL~ zof{aDdq8(_*($yhM1;aly>5mzs{zDm)bf~pUPg{hgb@mMTd}b>736Q+3_~NHJ5LQG zf&ii}&FvPQzW!4bnNMt*ouONh;p3t67VM7bI^o#-TjtozZXv+=G`Qo6sg@cq`0?8f zM?RiBzrDLXBpYHlzTx1lbKo8r!k_r(x66yF1s1g(tZnhZQ$(*VM1*Gb&~NV%7$#oj zr->!>vurs>sB3%`YD4&1T_1Rcx;gn>MECrjE9IRcB+(;Y+4QZ>8>#GVRNKEjEa*ryWDAWHQ&Qt+^9?`)mB%Y9x+-pcN@jD*^;CqEta|F}|vcw;)Z;Y%d~& zjw#bsCL&y&;jLQNL+2|kPSNrys$JY?F0+QyrnrA4Z{y6ak?XFY_C@>|SwNOrFG3iu z=LG;ut2J<%*bl8rX`%tp1gSt$Q7g6nl8;@>2gX7?y=(I%GN%DkD#YljnfAFlTo*V> z6`8R42&Gt7(8EMtpu}J_l}anWCVEKawyfcp@ZlCC>PiI<0J??#uHc`BaIxvbe|O}M z6r=!ho=L5U7$lLy!0OG?_6n$B3=p3-WFxYA^vClx(c*!Cl!BOXEwq!{uLkR@7>gcw zByHYPFy$GK*#y6fHFHa)DIF3Ds))1V&T)OGswHNPfY(}e%@wwiF*H;&#cyj@L=72q zOfy6h@~dJ*+xYDw_U+DqDL;l2g+6vbzPB+4spFF^8!_6ms6>piM}`*n@|WZc2J7{ zfX=ELP(`dUS$S@6s7T4oBMn`85D}@P#HE|p zUXfIzG-#@Q!`6#M5tDAX>s6aSsalWyvb;u(estiZfrULs+uEKI1@*EP-PPp|CSV|O zeCy&u&BuE&k`|NSY7CWJs3dy!;VQ;v0?KSzlaQ6oJ8-)!!yZWqz062DxTEaTP_3~% zg=N(ss{+O#Yk5O&S1hEcf-S=ZKt#oS&04isnbqVISu;G9C+YA>L+*=)^bvYVg-rr& zIR@zyD|wceZm*~7O2zWT5)~GUZ}q?#R$5*31ukt{ms0&;yt%r|5GXlR=zs$S=!j&# zp1B8g!Yu=?)YYe3$QfIydQH0JQ&Ana<+|pXQ%w{sD4WwYp3p-Q6|h?^Ek~)UN&=9* z*J!a~!uG)J+H;`h6qFx`zyuizXkE+@Q2q0HEg6vRLFXG_d-m#qIcRd>AiAlm$-F74 z;Ey_afN6>bX(*OIR=Oh?c166Wa|#D{%MfD?DEA`qTb1d|T`na%?4>UF(9oT5g|x!5 zg4@}xA($oxQ&}a+vImk@mVuh{5l?F?-k(xn5uK)Xokk`?^q2j>rHk;z*VDFN)Bs7e zE0*E=&5d#& zvJ-u--FV8oTpmtiyEV*S6XT#6uQg=|UC%8%m491%5A#dFSrwP#yZpW#zoqHMb96+Y zcDF|prZ1*+lOFC9h}0rzraP3cf(VG54x6Ud0+7JVq_Z_}Mpf02_+BX`Yflk@8rFfr z%aT=|BKTY(`dkknY)tckD5P=->XAa>typLTGn|9eiy zdQFn*AoNK?Z-=RbUR{p#WV z-J6FS*|xjiuRs3#*LSb)-~RmJ?#;WK>tC+lU%&s)^@r=%Kfk(v`~A(`$A=Hs+sLis z=?}N}f9W0f-`_pl-tYXs;2Cv?53lyw|M>rWJpJYR)lEE1 zKh1D2`sIxFX-3lye&pu}p8Vr@`Ky;fdy&oO%Qxr!4hn?_yW?VB%eA_|%+TdJk(RHcm6=(7UwINs3n1~Z zmPCSKWI+TV*Axf;m#|GZQC5x|%vRUe1vk_5L4k5f4oKn*k)9NNNsf7Ijh!P0Da?#0 z8|@J6XdwV*En@dtM4pNvte$RUkPLcH zfdwGZ-jVNaE>h^SB_V&FQ=|j)EhqtNsesIDcWa49Md;*Vu6%4h=1KTj3#G+(_^l7a z+NxY#q1OH6u3FE!{P;I0{0$2K2Pov@$@AO0TWO;dpzr`t{KP-MA#l+B;y#I>R5i`- ziDA>E5)4wtx3Q=aNcLa$%bqu{>j=Vsg=HE-=lcVxY*hwa7;aJq3Qfa;DG`vU|DR`O z&!vS(0i(jMQc3Uf?!D*i&d$ytG?JuUmUL$FCIo@Pc^12p+7egAnw8aE*zN?@o-sy} zSrd7Lw27oHsf3b~uq-h#^zml0-BVfCgF{+oqUfcwk6cB1@nY25vZlp;RCcW7Uz@}3 zh7I2pn0p_$o2?631FW_B;m5Qla|)J4If9F6l%)U*6jJk zxoihgZZXiQ;$^@~oAvUJsbe6BjnTVj9Q~3VsOvaRsW<|4ajxT3*Q5c&(Br`jt4Y;D zc9O{oP`~O!*ZJ&k@p?XxErlJa?5zTA#gR9jM}a2dzY9N#^NPDt5Zlo%ts|A))KH4C zN^t;GpLA)FYLcPf<+<&Y=O_Tv(j@(PwMgR$#m$aCD z0;*8~dz<3`ccIJ70KzZ4gH*kSq^XvgB&vdhRZCj95bX##4#iNiSz10$;H zlDJ~9B)pi7urV}qUi@XX>qpTPP>9w8xraA;{+z_s0E6@ct1RfQzsoa%MISDRc z7~}{$z`UKZE))SvXzg~(=|@g?{gKRYD;a5F$sgxwemopbBcw4h>@(!#!3#PtqS^;K^Ymbd#CN-@nrmvlNI6wqdppolTkb{2 zGI-w02o9uS3sBN8lbO2CKDim43zy;m+N8ScW#y~uJ8?llBbDxls=x<$))KK#sc+K3 z7qfU8C^aF3X}#s`^R>*vkw@7%+MXo+!Sbj_PE1eD5a8K&ni7C2fzC)U3DaYAQvttH%%im|B5GXNOth|? z*$+3xYv;J05C#7Scl{+xMO6YFu6+mOu~cY!8bC!gDSNpAeZc{$iAtdsv6p@ST>53l zz~Pe5l-(RAz$~#fg$KJ+p~!ZOFFd<3^OE!cfJ{}IiuJN{F?p3en-R*e4gp`3bJ9S) zDQ<-@Fe2q@HHq2pVO-I6W(M8z&s#?2Ys;ue#xz=)x%C*aju@s;7nx0)nyjtVy%;1* zB{qpB*7IhYX3g5cZYk44Ps}`l9FvEF_lPhF+Tl|34JhfEai*)Vve8z4+1%nM_PfT( zk)Zv;8aLtUOv1bq(Wt1Oq0{dsUM*7I5u+1dJ34W&38WBW{5&=*xoY055M5xFrEf)|)@5dm@rXq9s zE_yAZ!KIh;m%8>zJdBKr8ez1s2L)w*gGLnz>-XsVFUz8gODJ<4+KRMt*;kf!T>8z| zXGE&xy&^I@ch0g#o=&UMo{CKOnU_~XK@k?FBBSNc5@!UPMV=yte7;tUBCV;yTf(kA zC)yY}m}6b(@QG&VhA}p(LSd?8RaJF>(O5{!pw9v$rB%j^rTB7?~V$*n-e2K`Vs1ItGhg{BP6|(X!j_huPXh&>Z3W?g@ z0bEYnBDSJBK)Fb)aW30kBfgDo9NA68s7MILWBxD^E;}Q#lV?CPraEgv+c~HvQ6k(y zPxLfEREBE2YoM2AwFM)@M3(@+#z{`p+|Wzi&*NkYVWYb+P}w=BtUwwOY^lU!6vv~3 z8XYH6;^q41?&M|37JYE`^HiP5Gd@c>ZyI%!;H=|kzaX+7|BBt@yK7nK3NxSG1eMW} zY9buP>{97r=^p=;Jb#VO_v~xgmLAj7)okN}L?al6O3=d=VCFT;t$B{dpSm&X*EU98 zz&L{>t~4t^f9D|EZ$g%0@??{@#9K(XIz|1mwVNTEoVz0G$_iZOO`jJykxP?=C4wH? zYu-?x4*q2ip#wzZO=GmyF+Z3gE#Pq@H&?xEl9mj4OIAd4kRXjL)yA$~wH;te)Q6aI zU5Q8xGbfT5P?9r%;$MyChwW-m_=p;R{YB^aFh~1(KmQ~|L zxH+WaTCy36^pJ(MUxZq8Op4T2(3B$xmZeCZ^Smib7*S!J^6q?ZX3j?t19S$KJysl9 zLjZzSy_k5DRCnY}k#lI%Y2axYi&=?aRmLexRcGv&&PFuzO+S|B@CP~i3?-I|DJrSf z#ifFbPi4|LMV`UEM1ZG;Ifg+fJlv5$r3EA6rnaF}x2aXc8hgCFT{t3r#;I zmG4OLoVfFMC(8zqAP<-G&;;B>2K(mA`FcTQr<+&bKR!O({O$Mee)#_W&8MfgzkYgP z&>w%l|M<^;y#3|rx1T;d{`&Ue{^$F5_wW97|Ka|ZpWZzE_WI%R_Emoar=&jA$7xYyf5WEeRjz_yURJe<_Ga%N`fmjYjkQ(O zaCST%=b7$yo6FIjV}GD6*P0F`%w_BbOQBoI{q<~*QtXFYiLPN+gTCrjZy{vKYaP~> zX^p@2bmj7*K9!p#QBD66q!Pm-+*%?shgP2yr)2o$%2L7r5zQEr65=lp83PEpUXjwI zu2?1oL-w3%e?_$B>b0lT#X7k~Z;c%Li&E_FP;Rnek;|_RXWwd%n5P}e$)SW8J)th< zh5hO*XNeJ9J3ER%Sm%7WtL+f_(++e=Yb=E@1spDNaYboVMIr5U9&G+7Du1tzD9Ce8wP6=YCP}{WkkY1GZUE^Lh5FY3F`-H)Eb{aCb9vqoN2fMNN z?H{#sb8>sCrGgTwl^pHiQ^-jh5&-}q6=k)FhP!)oR#dy4v; zO0n0wS$UKgO6DeBt?J0+=iXU}9o01Hn^FQvL4^29P)^l{-s~I}GoE!wENbz%mXsz(WsG~b2th3;Nmc)|K zgm??;C&PzOBp-nRg}oO@LGS(yd|6RMzfy+{G{diZNU^#Z#Sq|K(Za)>&R?9I-qPQF& zRhSyk$fJtG?Oi)IdwrsyFxkw&Ccy+$+fG83Wsc(ETap>fo!#`a#);p9{ZEK9SbcDs zxqN6g-9s4UeVLH>2`f}!8+vth%cQdQ!Y0w#7(8aCtpjb<@MCqi;_Bf*Bn=0xjK;~~ z6cM913ro5HplgWYk#<1(FgVEpwZV=cgxKX@>G59VTBx9fWN*z%{;=vJ5N+i!Z^sTB z9I5l%)^^$@fi-`&a%E={OQGUy5p<36Pvq<8ZzB18sLpvmK9+oHU$^#ETu9vk7Kn*Y zJA6Pt*$3?`)B7uH{`$OCMk*=I#fLqr)_nL)5S*m}oktHlf@Pxwnzka7K(ojH2`J7* z9D0buGE7{&$?Rapuff3v2nVT7MR3F1j#ZnN|Dmv-A@r=>ZI)qp_(L3E%P62dySi=; zyzy=~vpm)Cb5b(WpurLGhdYfBoB+%b`8834%Q|&vy`+0#D6Ozoz_CHG*=7qB-jqC8 z3%m>eT2LRf9w8XVM%7ru&eS`iyA~yxsC{tXY}O`d9=4I7%4S?s9_2+YJz*>srx=mZ z>E702Z%lvHUYZc>XfDMzb%xLu6b+sjnIybjQ~V%QjI^~^F2$iFCAgXY;QbpT5bqCj zrIWB_xd|XlgM9f;hI4k{`FIT=m^2C<3l6l2KX~{y7$GKe%J>jdYGEf&(ywL|ipEEo zcnC>>0+U=7O1mq@_(c{F&1ki$M$laREQm3dbf=!Q5tHM-j~ho(#t@2jN;2=@y+yqh z&UjO9ebT<%)}jR`OAj*7OnwD_iMBAwlaj{1au=Y3$~qtXGNxWj6isxWsDl}LFtkWe zt(IDO03uC;l6t(#WrZV05w&}s{PL2mp*L5-Kk4yr~3i{Zmr=+RVQmn)bh1agPm8zYK16 z|FRS$BpwmURCB+bcY&e3zN}$+5}hmWf~P3dI6ejHnXgmmT=8I2rjpQ^tS4Sp$G=_& z@6cR=e)1VXnWaKWa)bm~#DcPP*HVit>#ngNh{1{MIHhDkq13ora4ODqV<2r2*@m`` ze1k>5P8P@3CD3Jq#)@2TH{wGZJKuW}yLkjRHW@813P&sV=czWn9w!ym8i?|!{|efRobcW>{0|K<7P%OCF_-o1Z&m!jF9 zJp1X*s$!`J!Z8T=aj0BmS9)Y zP=QxJJOm6q#t7L}I1_2R;gm`nsncaq9J3v}r473NRVnouy{RR&9qbRd}6*M@NLbNS0iQj}ZTah{dX?m7%i zd)aKEzAeg4o?6mqD8J9`9=fDrLXA&B1<(vn6TB5 zW+j$Ryk9QubY#`vm?eLQD{VoTC(TgNeE`YxdMyngM@eUHkhcIdqZ!IM0tkkn`|(z0 zD7`Xbk5B{`y$goQq!Iv{XKq9Gi>gJ>fm4`6X1*!rjOzw^K%uIL(D6`-@;h`c$cix4 zN5;h72+Zn}d;%w%aw+JCSmdvOy2CWU2;1xe7=b-`WTZj>M_u4l)fk3ygj3K4$=vAX zNMR2D7DNUeBW)<1l_D3iDeR{g0t^m3f~+m=Pn5K605$vx40aM)HgtTf%M`^X&f)}n zaTzoLfk0h*B0S(l9HY_8R7gYImkzB5RWEz;o*6Qi+`ber)}$YulEbpeFHa3MN!Z$}#k z8jN2MdmB0QgS1q*%eCAKv`m`^9#!%I%keK0nvp4xi zO9VF7ZgWhx0)8H2dQVtk?`kW~d+q#AW&9vAQ93USdcEOWbczd*S(v|I6>1z?SW6ZV zjJ|d)91=8upudv@2=>IT)1ZQX`2{`{V{R!MtT#P?b>@*Dr{^CwmB0#Ubsf4*Hc26!z zQ7^n1nue_u>SreGAy3Vcs>I+=$><7jwA`%(OS4YO6`3%^CG^;$UXuv>xVQlQCj8^7 zJaUb9h|#s}-SexVu(W51&IaM_`{@pH}mEVez#B7oz^{f+DSfyW1s`jFsPEOeGe80!4%k z{i99D&2d67XK!p)!h?>Ieg46SJ>QL`&L!{`=pTB?il31@pVN?)43A~(sojv_pfmJR zWI?24>ZH$EP04A7WI%=<<*I?5{B6DXj9>L)vKS^Msz;3SyJ!WXg7&gF&@?fuD;RlL z8iyoMT+Lw^`|>taq9L#F;xc1*ny0DAAPHUBGjg3xG6*Lj3$+mhSy}d#yn%fP13ry8 z1Lj=88~_P`bj6au>uq!*14LfXkE@P7KO|rbhMyHu#Vj<{W}+E;fY5KWH&#!&SYkgzKC~~BG2lFh z&=NJ{d?ydkKJRvqV0YVkn_cm5EEkBF8_IE#dNCWEyI15yP}rW}fLIS%Vmq zC19~SVRoGxcVJ)v3)qHQnLK*tIRtRb4Tjhf>zxwlLw8A`t$RJccj=VVfdg=eo(xYL$i}0V6+GAt(W$O*)6+d#PGT)cAdq z{a)Z$G`D<`p-<4l9n>VpceE zejzg46&$1hYU|tArGbFZv^Fs-mIDDt4iJzuAaL(qaitFrLbQcB8xsMvL;=PE(p9PN z_3n{19Cl1LnTtx{w*mnm4Dll{{XB1i6%Dav(GOI01Qul;JjJx+HwnQ%+j)??gwWJX zOPo(6P}#uTLr+Rr7D88ta#`kC~1{2K7N|u2y}__DKl6KSezc zdpI@2x+EK>mw?K67_37-5ZfXBfyxg?8-QQdSAbY&^584VR zC;+LdyDNDWGkLUPNi=2x-7Klp1{x`VN9pMt3_23{W34NV?3O@Qs}K<)ZW_0t3iI8V z+g)ULQ9Quxs~|z%T^E8JXi`ot&1b|N*c1lQCZ=aQH8{ZmK;E6#sb0E%mo>9C)*pac zmugGV5IDCGDJCqgeKHfM~lcs$;D-}?5eoY!L#`ET05gV@g{LzP3sT+ z7ldL_a2%5G(wdqiqXkZkKgQirP$-i^ld?Ut?1 z-&|%;iOg@=;5Y(^hs}4REp}alf*JQl@SRBnc_Re%K1xSTvijc0%!g7EIJ}o>Yy;S} zF2KU(*}hv#Hi)z)Na6SG0g-4jt)hBw@)VlC@>-E>E;FIxn%8_cZFC>!-AK{EsCnEA3yG=7F3xqi3xhz>w~BY) z34AC|=Dy1gr=*jfR(pP|Q@ti1fBlqTpEBTNTwzl4*2srTDzQ6@t)9GIq?HWH^m_a; zlm=^z{n-WS5Cnk&m?Nliy=B$uv?6FsX#gN$Q$W^5M7oIJ#aA2vG^8AOWe<8H0oo)a zmc9>J9l!JHu^1S1t9NO#1oZ}`C;_7eA`&u)ymKki#B%g1-SMqKt7@=GP_S1=pFYBu z5IeFGlfr90M%cx+b8->jk9x9W!V>|NYgYvgzWdfhvJ*tNxYn0?h;1c23PaU_VeZ0DN0( ztP$urw*PTBc`hR#3^cH6g#v&o3BW#5O8}VA1fuF;617`zi?y zZUVM8xDBwZHT8Yc%vE^@<%nmATDA*A?sCMYcLAHb z-@Z6f39#i8Kz%kqkwxIlE>B@47si;q#`5%c)t5g->r;}u-?v}WJGbGNy}&Pt#)<3A zknz%-3exNt)QV5<(r7X89wb~*8hLkVf)trWvP~lwW{Ctwk=^ngM2t$W*YFiH^Kt&h}JoViuT1!w`Ob8;=Va$7@bqp8zz{eYGd$A|_ ze*%7_!>80vTfa}g2TkYu3xlDyGx8=KiboC`HS)<5m#2NDeSY~YW^;aV@{$p^d;o&E z6GW2CFG4C{ux}8MAxD$cyAUh08&{+EonXviy(|{GE|8q;{6sdWJb|?N?o-du)p?LPTbvL?tC^4(-RiN~;01fMIt3Dcq|CAlUwo2^nB8ZHTkq^>u5pjAq z?vfFx*K^AbHR!G@ZtB8#>>)7BcOw(i1P(>hjMsLIw~&`>XS+r!4@jQ4UWRsAM4&v@ z>#YnZ5`tqtUY?~Wiqdr?II}(5wr3-hpDWlqyyHsE3t*nJ8`uzxLh{)LZ1`+!-E_cE zlm)XY9z<^e!^`WeKjzk#uYbz5e}7JG(t2y_Pwlrhad=qg_#W3~`zG(x%@FNS7%?I8 z(`z$9oDckdFskf7yq8OQT`3HNw{ka-e1HIFMNK+jTj zHn6>r7ho7u=`oNP2)n>|mm$At-#$r(%_{PM3r&EqS`;M?)CIt5rhSb4trdo#$Zej@ zgs>}QD3y?M`gmA^>06{TC_e5!);>YiJ`vkVlyf3uBx&Pw9tSS29N0URFt9Ydi0`KR$LZznX+v%s6#VZ_0*5F;9b-vF zVb3IRw9gHbv+$bS12PcvB(wD*lMW`Cv!WSraqJ+BldF80z#39Wu5>byWmN&a3MS#% z`aGh|P#{5e55|$B`diCyuX~=e=8V_vT_S_C3$MPH7hz2Eg;A4F!Z8!pA~r_;@5(xn zeDUxbe$JF8w4VpvIq%7AWy?VAPN1H|ma$?#c5(Ir!nwvy5;5DdQy&dwVfV1%hDkeDxC|Pt7b52GPDe%$=fq-VAjJ3`1%3)iY{L{ z2WpKG1d7vmPcBqc2V~-r+ymMModRTQUEKkCHdW(++E|-j8u7C`6ZJ5fln6jOn$TLq zOHzXRV#FUxJpN2{5c5K3x8Y!{ZV_*w4%YE6Cr|#9NOg@f+*i#_F{IzOOW3q zzFsisLuC_?W;3AU)HL_b!^>1DUP0PmljX>C{G_QA2}Is)KBmfUOgd^Eh6lUrq7al{ zdI*VLH>TerkKL{e67q7TkGuB|@4hD;cx?2#T=aSKfuUh&&@dqMZLc^$p32Nj``2N$blRbjf;uTpFi4gu;)FtZJu9HRx_|WKtT| zwmP7|WVVsSgx7@7chgCR&ePWr^{!$IgqP()&ms>r8@~&5)_k_2!Vq?pI%y9eO(sM` zir)_80!&S^F;mu7P;(b`lSSd$#`dXYa9qM&%^;Qjy_LSNHT;ZPUmY+nLYJ@b(5Ls*I*=utp?PHWP zkh~#-#t6UYlz+~bbDvGVHzaQ?qTm+|909mAW zHC3gDwrBWYn0$LG^@F6L&OOj5jz;xc5c!}?A|0&gL=ZS^MQG_p6+*OGK#0biS>{O( zBN%Y*rmcLViMXpPV+{_2X`-P-pY`ly`6NWx)VdSuk}ZeZL`n>yo^yj}yL%@>?pM)u zT5a!9Uqt+jJXx_vG*r@wdXMKaeR+PdFRE~&wk-Fyou6YlVX7bY@qJBD(`3GQx`}9As4Ov`rB!533Q#ROw-+tRiMi*Ez$_8XEOX;@y+Gu zoO1bjzr2S?g^XAfVgtT_J%3+n>*C2Y7K1V_)@gCFwG{Pavbf;x)Y%E?Rx$LN`@tm6 znYK!?U1&$`h0yNF!geAF_uRZI6p4==odf+c>m?!OOf#_d2%#+Q{l98Y6GZ}KtzMQ^=*U#43GIY^t zo(yWr>*X;7ML(TlApj~#2f7?1h5W*_}-Z@nCTdR=IqxUNup5o2aQ zyW6OH&Xskh;=uBuVRi$ipI!!$xfEDF%1CQ4vFE;} zqvZU4eh4WvT}U-aX|c=Z5Hi=_;j6qD?fqRBZ9js-2bGjsOkLOFXQw13?L+x#?5vC! zB6Vb5Mh9MjcNC%V}ZJm`{>(sq^PIjVm zr46!p3Ryh@?KcVyRyQJF=W{PpbeA1}-Dfn_xHZDDP;jRdB1=aUVc#o9H zN0bfPyh?_-Wfi7~g9_8ucUoXd#%5Gy1g&FluN|oKXH!)g$4i@8JW$;%bpS5BlsQ0K z%2jCj?YMLR5QIT?Wc6I-lfI}Rp=ilsi6ppvFjj3n!^X2UolJVXx^;?spwJvB)Tj)N zBj>IF5oM1PJ=K%5G86Ug7pbKZnLm588-k*p*Ojvt0rk+(!jeC|_HthRr%W|Fl>sk! zu0Uy4&+yZ#@17zyZeA01Ne);lvzTajer>EPwvJLn_I#d8O64KC&Rw`4SKiP4;obKn z1dp9jub=@cBVd@665wlHHHIKbw4;^jW6X>+RLb@4oN&qj)T|$lE7{H|7EpsI7>le! z9`)7r|1r*c&Ra0su@aIq8y_INy#zH4l9-wZ2;ycWaZ94ntTu8S)j4g|zBzL_MpD=E zIE-@;pDn|YP?C`_>f|#|UfB=pK8VN8Z#v`TBR6`?3mL{DwQuOn0OujI-+jOJyRrG{)b)r``nRn#+whZrM7|#8 zRwt&%kL+h@?sw-8g1$4QqdnQ5(Z|krsBHL$Pe#SjG6m3Nw(H;CM>Z4Bxkz6!y^ZaF zg#!dqmfJLHn#l26Te zmpgA=*A;~S$}Qc<+V2Na*{a+bZd?Y6OhbYx5s;|=pKoUEdnv^ZoD?Pyq>;$s+2^di z9y7D@9C7Ec??y`Gyp|Hvm-j&mt}((CI8Q=pr1SoHYIXSK>*_s=WH^k7t5&jny1mKY zM|5YzWiCcUH%mVCx5-xy$MZX=S}Iy5UTs=waRTp+wrCEa(Wrvzl<^1hNE>41h^Ka+ zv405-_tOUVw@WHmt)Y7Zc?mLNWoT18m~ThDHyGXW4~D8T1yowk0j<8`?9EaHDj_o3 z&slu>Ia{xu%LJ1yXYbluyJ$%X~r)G{*1Fep%dSnFfEhNWFcr zb~RAWsZE9wGsD{AKX*Sltz(>uTM1N*Io7i;6}bmRX4Pk{*eL*q`qZG;0+7W&Myl9P z$gVSxa$P9$h;30|lCcnyH?j`Gxfw`}cOw(|cCO{^D9Tb33$gzc(#ko<*6DX=#Qk=V ztPO#?gtc*&Ucjv2<)$zYN+xcSKwr%I0+5Zpj z;Eua{&{bXCUAvyW7R>eb%;lsgSjsY&IU!pMatPr1+yEOm@(aYWLOgEjakx(F+ygy5 z+*r)SR_7Iijh6K)Ya@hf{nEt3BFGu#%F5)0adno<3r9?I4HRr7@N#}r5=`6N;No=K zSQbdS9d)`?T&xJ4mcqt-+DHK0X;Iu5D_!@Yo~(zK%_?bSAk#mlTKzGCwiHA|6O1%G z&il0OnJjrvs0bjA}PmRXOYap~%z zJF?YQsP0+pvnZjN$^F81&1Vg&!rHLX8;c<_%mI7Un)5=w_(na^tIot;4E!|%^zoXb z45X*0;+THp`ETdQX2yGAOVc8cT_BiT*rsiBp2qVu#pk@uju9`?>U&Z6-l-Wb8f?PX z1zf@@`&TzOx~&>1G?@A&-O*UONkPGC#JjUKDLzt?w1`kJm|9$rX3=|h+{GNmO_}7% z2$PZJ5^9{c*py((WkY#-8wYU>H?rbQSktE87GNvQ& zQj9;X5o?01$x}Yrn=)Gmxq1Nch=gVlHru%?pe&q+j&bOf1C9A&3{KKV4k__{2n5&p zgA(QR^!0|chiI2zqq$#Mcp}se11il5S;9*93L6?tS;Y$z|8$fekli7Ll+u>C+hw>g zG(4M)=C_J{og`jS%L_tbi{Qc%Cin$Tt!nN;b`$=uwZeIE{w#F0K(<&M__zAk`zJ9? zEaLF;F}u+ zPMRFR?d`n0r%jU3KY{uyp^IY>gl(HFMTS>n`mj1Ze z@U1qxRVU_1LmY{!a<%L(d`GLtMz4qSi0NIQ8~(Gp-J6KZ;>NKfH@u=hrp@3%zJ-^3 z%l?>yLKhK_QW}T$sj5wk$oJ(*`%4{Kq^kshIOqy^my&KV_o~H|sJWg+eywICPb5>r zo@s(^MGs8|5;0v3ZH84O(P3rA^<(=@A+TD|QAv-w{Q4WlWHc5$sZSQ8G%=j#g$fCq zhvFj^3(-uFws5SZ-RfLqpazIx7AQh?j=JnHL{-1h_N;ZN>q8hD>8t_~7}>No3md%V zf=xG?bcGa-v1=D2DkDLLW|%Ps7l~EzIU2^8~SYxum)(!6}oiSOcKXp2Vg$1ahL$fn} zcDj6p6jKFr1CBjzw1T&Q_X~&mW6+G6azW- zYLPM1_MVd$a$D_@q6^BN9^)?Ssq*#;3cI%^)st%lVRva#Q(>*+?;-BqKZ!{E4y`B8 zbnqF46A5+dkr(-NvAa;aFp=C(VGY^Lf4D}>TD31o2sNO}HQ1NA`a|^d^z|0g-=Nen z*>mr5tKNm-x^-)~us6iyrgG?Bxzlnc<7T7E%(%-Y5&QU#nNSNfuEiqO$>jp;x&)oL z57Y+=$%fYs$1#zd)-iqHNM4pOXxbd#1-1L;)ogn7_l1^Eau8gErF4PP`bjX(4&@5QqD1sTjMX?^ctwQc5 zHFz;NFPz>}9TR?B7+r%n#PK;;UJHxmX>9eft9CFf??#sPK22vh#+=BZm@_RWXVvfV+NpDxH819!P?6LEy38!ax(@xp+RcwZ32hc^6BzhaRt^AC;caV-c3<*9_clT3X{AzklYFk)u`(3{J z<5QE;;OZsBoLAO@AV*9|U@&IY*0~HVII25;<)klSu)4ZMyu&upk~mI!Sur83MOiil zNkQ(3XbR*6S3nZl9IaZXbQ{FF+>!IqfvZKW<}cL#U7uM*3Wks%HqayFD%D2bY12Kz z4xGx4o8{%rdMB3kNzw%NbGdEz(_Ht54&~zNhlwP%AM>~ zc&ZF_uZXGTT8%^IQW*qd-cLaR0r$QT1j!l&%aQoC86EX#EX&_}3 z6N*LDC^C>9U1a4kx#>s?ksU_yVdrV%%z$S zf?Rn{O^pLZK+iDc#p6Tym1sK?*3pe?PSc*nh!bm!pDB8)dY~8sHfxeJ0Y>N#TmE@% z5DW0y#Fw*?Y0B@+qp)yN_Y*oG7FTDaapbmxQ(kWV1Lv}wpg+KIj*jGR*kDLg8`icN zs|;^=fD|4BFG^>VMhpciOwv%?7h}O05HsNu2NxOE1^NxRgTs3?Y%%ac8m>0Pd46>&6sYS9Blg4Ulg zr!?5|s9%=wn8_&kYek!!HZ0iDZhHgiPq0@TM%wPQ@~}_R=Y}3P%NY*+`Fwl|{y_uV zA_hU5&#-Pc>*RAJD_X)_@o~D`DD?~3$2VvJ<4ZCYc`-jYBYUXHd5%H7CtO!1Nlj+e z1rRz1cn)0z?QK;NAE-Wj8wqnAOAvNza^vpc1N}V=oTk=tja;Q8f7rTgRj|!7?JV@= zNB8;58S#+MPYa1diTGDyZVxC0s@F^0c^v`x6h5nb<~rQy&fptNs6oN7{NVwjbS|OtY_DhYVcy(Frv{$>k&(Zfe)5@)cqv;ZTMJL zlf?(B-ab?y0(*0~Rx!1b$xb~U)L`qwe$&p>Ppo=;pFZ#y^aK*=q-&{de^|&|#iy3~Tp! z8k#ap4bS9ZHKA=+w5B>4VSJ<>{>{MUwkV|a$MBuS_5A}iXYd@J1nuN-;_4=-_^^YB zx0y^T*>T4CXIW&iOb({DI4wPJzYlSx=PYC|77@3Wc;397BU||$<82Yvaa8gEW1Jat z7WG!h5$+-mk??nOu3T?)&OnJ}`=4@e90M@o)PZkTObL55y~!0qz`sluF9P)trSmHu z>h}%N^Julkj0A{)htdYCwVbTHf&&QG57^qT3Vfc#!-t!Z3 z>wIL;@T!gf_UP}CYwSwgL0ejLjX`7jzQ6IkS>V#w6DkUO!bvm7N&a zVhF_@#5mm-%#3P-=23nD@nxc%9_(A4+`0&Zf6}-$$52=v z=2|5V#wnXwg8$>(ZW){_8d@4GJqxsv}Awh8D z&oGr>t$AV#L~7Ku)r!$}8Tw@~YtO=KlYzV&QGTayT62)F7(at}!QHj$|E0i(7c#!c=x4CG}-17GDM#BlW&31jE1 zHTgj#@+&Xv+z1?$E5)m7WjOO6R6`mp6mfPrOA_-rz&$n-YhNh~((Mr?Qo6)Ea=<<0 z^sd%jAL(J&25vM%BJEs^(w!V`RB~y_z z?s%<$vV=7l3Ch-}k-9{tqfIb7EZyl!c!FismHAyudQ!djnM(>uE729Anq6>3b&r`( zn(kwHFEp`rn3TeTwz81PIape%D#Td6;olcr z)p{34dT94byd?{|$c6^?!rkGjXu8GX4MX`{*=X zd(~B>&|A89fvYS@h2_o5&!QfXz(UqyvG{UH)sXB%l%tik1SaUOPyX8}e)RuHFW_)z z^;oRUR;GAvt1Ds~n%i5o9vqRKv<9zyEDd8?Vvon~=cQzQ@K{<(Uf7cLI8z@sJkw>S zFKHc!Bb?yqR!Tn-B? z=BX04(~9OF8#EgaWvc+jGAOVuHXc^+NDcZRcicl{pZK~`M6OD zL#WBB$HSslUibFykJI~k^*WBe(%Libua#nyStUoyb3g~SXciqD$#@&41N}^GbD3nR zTPc?EKwshMx$5}>%FJ9+F5{}ocZqen2!rlvG{m&*8WDYr&PXATjD`q#1RP9@jKcOh zJ1?{H*XO;W4nA*ZXRp?W4gQ)J*eAmGqqx0Rhe8LnDIOg>SVX+=L}Ym_3$=a2LkC4w z7^u-JT64(S_6vf z)29azRWj6jzOS0$svm9nRxualh5t1me^9h}i|mvZYQ$+)@h^(}SQu<3Z$W){L)6da z7tz5N3>*x~>FF-`+O*oRUFL?)PFQUWy{ZDUjfS$^lQN}H3GH?_jP|JI#6|>H4oSr? zr+FV@#2}=9EZU{q?1KxGAY59QCEZnOG|HTTAvevIxH+r^l74xSM83o-X2=dG0I$ly zsZm7L%Rj9_yAJqUE}n@c6&Z@@NEv`C_J+&BVUd+9l^$G4kiL4p6y*IL5 znr>eIFB(7CkSV(mWqRf0|0XiVs%`<-75Ze8P6ay_`u@mhFy;Yo#X+P^uYE10+CsSe zi}If!a()CM(BhT^6R6CdMOHB}Q=w;>iP8)zf^8D{sC7#4LtFs^m%_+;6lFm3zaw7H z?bKwv2F6#Q0CGn`X;IX5|T~ZMox=zuyF4=N|r#23e#R1*BG7HnAU6s<@$4 zN(+0@9@ggq0B9!XH7>`}gHmg3it+$CI zUpF0Bk$Ki($5Aer7>u2uhpLbczM_=Nf0y*8g^X#alyVc9d_uUWxkCF|sb^C@CKa23 zd>ybj2dQa#6?tOR;yX^~(dRr(f$b7&Mi*6OP^M)+S zfM^sd$^60YKz0R}Jj#!_`mJ%xOeUNO9|__m!jNnCN+#*`B9U(W6Y+7L={~BAhBvsI zs9?Q|yiDP-#Emo&swiau@f0|Qv$+&BJl;#|iuhjg~+_ zU&df>PhY!3i-0fS+q~K1_j&N~A{$0P@3%7Fcae8k@D;s+JbpQNZf>$%kz<ACk|+_$P}oQ+tDw9!km@LYdqKmDa*Ia|E>Jcp5pVG~xkNXyO|tM~Vu* zz7IRTX(WT-;9}acQUn{7)4HYQJzv9cMu@GLKYLoprM$0*aM^W$N<6{#zA^n~yV(h1 zXgyjtG6q~t+vBeUI3{Q{q6vt8qAi)$Q8f1YPCiQ{H%1;AH+iBTF|>~8V3nd=AfhRg z#Lael39+@?;tg0bQ>YKlvV|18851VhqGURZqz$-1sF30ixSZ(2fpb4cfe%?m=J!I= zyt9!A1$@%f6iE)`#X;*luw{juk7=+fFh65P7`WH`;v_1fCX7PmAPFl&OCc8=xiph_ zK+fDe{lKeUq~C{aIKC%oarQT%%D@gG^tb8E2xhPxkWmyW39YJKoOIqjIQaZi2Ci5+ z8aqb+IT(jFa3&$$YwZT_+(}_LB`waeJwQ!WanF{n{|lMuKV?Pb)R@0-JYXTJ(;~)^ zi7slW@M78$MyP2_lL}OlV?9+{f7-*y(qgC{MBrHgI|YisB>rfYhtnh2<9~Rdx@+Ed zM~)hbh~bowxGfG5OK3k(z{h{{wi|}=L!Df_W-bhXc!VUTN2>jYMpxSIJ-71-!%Uw@962lo`OIX~r&6z);LA)+bIGXa&jX(266c1mC0ueom zbE9j8YDf(x5X>>PHf>@hLclx}I@ak0Lv0%^7BO4bkkZf@o0nKu#EVHSQ@}8L2=lkW zHK}a&AdDJWr6X{n6BM9^%c&oI(64#6bnb+rC9Sie3&CV)B?5`a;cS>UnUC=>pb{M_ z+mW{jI}=S;c4GKs|#bB)vHL*%+6Xdi`z!fEDp!a9(?#5%7 zu!>lt(}?-W)q0vsg?6tMn%i3Tve|rvFBO>0{DhXb1Yk9-0_c?Ub)*^;D;fKWo`q2Q z^%7NAP63L#(SDllxOSCau&5Jhsa~0mxL9+HjGP-TRQ!F~f$wM&T6UTAn&(yb-pqJ~ z@BQG9f|_J%wHz4G5KecoS!4UkYjKTbbfeaK8nBKIETJ?_=i&(q+!ilMK`HEl!%c`o z>6nIr$bUArnsq)+$H-_kidLTtCnf5Bq<@p7b4aR9M@n1p-RN0nP+gm2-lvdh=-^J( zN&0Hi%}n=;$I6Oy48bth^~TJFa2K&{gMC_x2{!G|TlTXHJ||guqY!A9Q`9rnjj)Ds zI(5Xzqp%B5VJT(`ZJcW1bM4#As`>5B>(M!1}6;>1hgU$FX;a3Ez01U@T zP@h?D1^@mBg)vJ^UllT#VVxPwVC67g_2vUNjYYb~fm0h$a9GI)_L`DCaFvf&TA^zA z1~d%cj4Vv1p6i?dx2!M<)ae`ZdECBRJPnCrtD@%eL)TbMx|KKK z0z^Yia|S~>yB|+VxKeLS;+n3RIbw-a&@~U5?=sy!c4avB@n#U0*XXX5*^k^$%Y~_d z7e0Vzlh=`hYs191b^223Y5Rn_$cOB*$TLoj8bi41v5uL7p^_I-jbIQ{O%h6-g2m=s z(G;+CCr*VbgBwgKI~j=r&5~VTp+V~26DKHryGSUBVC5jC7oRSLFn_6qBxHFwnpUo$Fy*&3GL<)df6Q!nz!~t}rOaE&tZ`@-RrO=v@abkk9o;_V&lBF|QQhIzr zEm&d^1=-Xp`OleQAIMKvLBch%kUJ8n+xZNQ(=op=Wx~4i#UqfZn;jGWTyZySlJ-?D z#_pjPoFaOx9Br*Tc&-hBq<_AnQQA z$V5klVu&0%mIYXNTGq)CmGb&TbsBw5fl|^jZyRUYymE$&W>8&@{yc5yH?^H!L*{CC<} z(TkJ%Iiw>(tR>d$P_CDR9%0r$OAs%r;d3omI(VoGw;JkZ27RJs(@_%=OZ|9DtlVOF zgaxWTOXWvvH3~uG>B*MtjnID&yShgxMFXUjONQWg^TA3hIF&PXvE}a*F{D^0(aDe$ zCw$b$gz=h)DKZ->y$bt`tXMpy9wdK=hwE|f5s#xc<>5*#fc{JB)#{ItLmfUDx7hm$ zMFtf(!96zzM@l#-YZCl>F+emBFhZj%?UT4b^CBfX&u)&c6*#`94t!1(;-0Q5lP@Zi zs4lw+iw#0%nAe#C7GySG5Rxz8L$mH#o9=PDV8C!~r9d*a%O=1n)zSx7n7iH6Ue%ov z3;$5lyh0UhpCDmqSc;!_ppWhW;K} z^vBofcA(@UZenv~g1gf)t-S4@(@Q5Narpz{QwB^_%~kXu_ul(vWAVTLeYNrRwOZ;> zl>Qvv130@H!%32qKiFYM>V8B@yE@B1{Qrls*%x$mu_V|KnDuDW&2_uDGoiBoGFnb~ z9N|qRC0U`9a;bB|QEn+$$P5V3q`8ZNSP@f%%;m*k$~mRW$^|_Mys!pjJ?)qfl!Xja zxeo*+j2_UNN`5E}$-1N=U}D>lt&sPUB-MXI$geiVvR-f$z0Sg?{ULD| zgn)w#ZUA6l9zYiqwFNQjH93DVz`4zeyhzL3{0CxS`&e+4=$-CN6a1-~e(2ieTpCe; zAqh%mFELa{PNuH;JDLFXM=ESh8m)PWks=%&r7XfXQk0n0@?h18VX&F&tx~0@*<@SW z8$tY8<(w39$55+Ot+9=mxrLtW2gO@{jfaP)my+>{jx z?`+|5WV8uI4ZIDmL2O_?TYl#cyMG4_!7(#DeV+3Ut^YeJzxHYJYae^gU^Wg6HP z1u?kyB}leGBSA~G59r2!uB;mj;K+V3Svf-s$GMeMq|QUN0)PQE#14NpQasmD+U8nG zs|r^m6;G;#^39%UCEPDcG&`X>q=%?4+3Lr$bptcSfG1R-4~;vhTvalb^t(@Xlcv>?;hTO~)mL6+M^5muHH`v&-Up;$Dn;a90O zS-KlG8^>&&b=G(YwgSQ@XfQ)E;&Za$K?#HiHfexfU6J62Cj8X0h*7v{itM2ZhS{&D zIREJ#!Fgv^o6`=r8Ezy+ac>S9O{XUriN9sVl_qHjAo!sOh#OE!mfXB-w zV!;X2iBZVBcE^SoXeb!;`}7`&HX#JP6`6q!bB5t5+QVzGh{S%RJ})-b_kDRN(f58b z7^f%T?f7}M|MGM5@w|EXT3vEDID3m9`^x{pTXwkqJ*oa`f0IkF`+NDZJbnN0^FIAK zdh33T@a6t>_KUyh$6xyM{;|J*^1Qv6b!*q>=k7D@<>&Ev;fTE4Qj`KocNh$vlDz21 zSrw7=Hkn z%9u@4;F2O$#b9ddPlpvse7n=v#6oo9pyS3kQck1o)VCa6f)9?r2m-gJ0k<2a>au z*&Hx*2L9tdUlmia8=S+Io3x&V0L0Ls#=Tbg!cDGU1^+M?Bg-C}W|MeuWRo#~L z^OoIm{`>i}-8DMB_yp(^N=k$8_kv%iL>rP5Z3d=i1Ie+NfnG(oZ%__Lq{Zz5nf=qe zZ8JA3_qwjb3q7e4SAjH(LBNW16&9j=A<=60my4wjF>U@s6*{+yB=J<;Lg;owmlw5K zV8H{$!gxSKP4w92kDH>NcQ|!1^oW&8`up4*iE;)?Sc-xvOHh=Ae+Si4L|QVYi$N5M z=|M0z&ax)92!-@Wzln;yoxKOEVW9sVTIm?(!UX3YDDd-Nkf}j7^P2dkF<#af#GrTb zaG_z9IRG6!J_xc-cpVG~Sy9qBE8?w-tzF5-F_Hn{(d$ zae;|*3h4CU*~$s2o`Mnspdk0AV1e3y;4ZSB5%WF{N_!m~W8zzq5v|vyW(5h++NoW4*-m<=O|Qthe)bI&O(sGB-5TbV=4BZ5cyjPaNQqHF(H68sAIvdM|4tqSL! zAd?31MbxN*C@LMmR1q8;a>@$eY#eQzp7on0$?cN{1o7h|^zKi-wb~OU9@4eI(q0sf z$+-Mv5+8F;#=j6}{{kvCv>@^xRdO2g2cnodG2_(5O&`M&e++$$v)g&cN>9)s5UiLD z9uIZ^n)z{N=^%GF9^=cACcLHy{GhzR3?=)dMiD02?y6Zh+8Z9=%pbT2bW;XGv5Erw z0Sy1C3y@g(e~bD}p^=%)ouJgKB)d(?(#7G#vyK-3=kyRF-48Z5Mal1;W;^Hv?1jjy ztQ4&g&v&1~RbA1av98SxBwKe#3xj8-(ZANr8|=)Awtzo$jel>^^=i6{TJ>kL$jcF< z5e!zfaEt9-NAza0nAGD>Zk=6mVo9nZsQdc`#F@hLYS2CFbT&<3pd0NIx}u5IcqW!E zH@xNTmdFs2c>o6`&Ao2L*omLHqYdhJsnjRkl(Ii6(GNx$Z zUoi!q%uy;Lo)E5TEg_$3-X(W#vD+^u6U>L}MIurUzb3RvdTA z*5Zm2{G!Rx087N8SvWrCf#C#55LGwq!V{JiwpCRe_wE94{JQ zZGpSbHjK|hW5@u;cc>MO#bef~u1v~egfb8eWT({PARj+}3j6iAB96`L4_Bv-PEQSe zJ$uA-7=|kQ>B*PZql;m6jo#QO4(b6G@Mc202dpCL+p**#J)fD!Ic`>b)`ARQCehI& z;JjKt$9u~c`P_!QKYrYE3l-mPUt1IuQ3I&KoMuN*!JMWzR=+#@+FmH1)5-V2g;A@q z)3b7fn_=@G{q%*dw6lmF1|6IzDn~|ggbJ>>1&rztQ?a(-OP$<(w`J)99wXjE6p-&* zUja`UA>}WVQiP*Z~ML5 zDbOr$-HzbK8>1;r2^qW8K_zL0Jkjot+aKrqA8!22Ofq%$wG8Za3@aR~n>4O3by)~} z(MerhSrH#q*B{r@C-S#-3>EU<7q6dPVPy$vYZg7G9MgPo;Q!|M%(bf@%5rA?zhMZr8(WSY_!&4) zo=<)Y-bx1^>2d-xXPdc&tIzyHZ`>>FbC4Q{FXeLR$Ta5+1HPYL%iZ$CtWuUAa-KCF zuhLHa$F*qbNP$oEq0i?}fjNm}Zytmj%=E0nePiy6n0WF;IC!O5x5#pXs|d<5=E=gN z%XN*di^3c@vU1jzqFU%iAcVw!U8IUUSjJLLVsQyIBT6X-+%5v4Mda#1JImUtG)huQ zyTB%9oMeTFmr#1e8hhIQ0ZZjYOFgj3Y*weBqQuhFjU@!O#MnK6G;vG(1CKfFnyQt; znpp$5_TjxMw-01Q_ThBZc~mPQWm#0VK^Gnq$~A?-<2YCJ3WX?ocXc$~ z1us0oMNu(=MbrA8eVcG>`bLdgu(WDoql2MBWSU0dJ3hU}3)nf8=W;%8*2rpqcki^b zaUh999eHMI4%D*5_?pmTk7p|hbxUKq2A6b%9qkdY6DI+2GQX?xG8+$cjj#^9sOQ(SmRdXVm;!AQm*q z`K} z08xE#uL1BZRkCcg56)*DCLX48mgVp7mWHyd&Q8`!4Oq^Gs!5dzx0-2Zr6%057PZnD zZuCW?C(|af8cU|f2C@WGF&@@HF=^}t`irlW9Tr6wPOZaR2Tx2*WzAlw!;XYzV8E$e zvQ;)vsr9&lr%SY13s;j~;e}g{-V-Pj$k@V1{$RMlAi6*^78Zd&ASYJ`5D}3$wNajW zVy5I*6|-ze%-@=NpUd1a8N1MXd(P@LfL$V1Cit*zf8^kNU_2Y$&EOBy^4_7#lcW>Q zpa6Ra9^ZGx@7te6*CZR6iXD}7WEMH2h zn&31&1pL#I`8W$*xbq5IjW>sz{?wNPQs}Wxz2gu1*o+uedsdE8SI-iRfi0Iz|2PIQG!2HBe?El@cj!U8avX4KhMfBqr_D~haKVMX4rC9u zEltmn+iv6z`ZL;rIuH%+4Im9^y368+QJ5`|0ya)X(i7Ys|Mv9#$RusKb#dThhN90v z=mJAZZm}c!Pmc8~s*Ib-;45MC!Mn(W^UC&~?ZSbsl350#f|L}!Y zM>bG5KVQLHykU*a04*|m5WKXH+r^wm$`Cw~gaR(#lZY2wn2a3DH=kP$?P_+UQ}28q zgvhGJ(A22!h{t@FD$EOH)z2nWHCtCW%5-0pA0Dalx57MSq!8b0GGM9bg4Q zoK_6ZOWRg5BE{x%_i9aD1Tr^0DAXb0NuxeD0rKJwcSrKV4 z1+-l{eNC5_HTnzGwTSfJDemh;iQaiyokv+zu^+IvB{h>IlEVR$Zk?|ytESwUz0yr= z360hI5&vDg^~%u8`|LP);HDk+ZJCeTv*V3A9q)ck*ZuYAvrExGdl;H^a4!ElGQdeL zPnV%A<{~GyY|BJPMJ1$}XMv7~BP>oDNFB~E`Jxw=%Jr>#+?4X2-JQD4P)=go+ZSk3 z$j6Npc)p-l0mdyQ@;;ap8sDcXn&@MRB;M4Qwa+1!04mdE_fjimE>nKu2rbROWd0aF zQAv=Hww>aui&V66-YHoT77#$6;rzDv$Fg~;tu-uvqGUt>9!#+Up%W)S*%ow@75YK1 zVOY99e<{8d3awM;viXy7L1dd!Mq_ThC&Sj%NOZZYuT@p(iegrwzM$DQ+?9TlpyjcW zY`wdPV5i^af~a5hj0Ux2S}(~6cyrdWh!NN(ib}}8b6TT^Pxy9H?&_LH0Uj`iB0^Y5 zvCysc-Hlv5SC^$6<+yEEMW*Y{YWg8Pp)$9$rS8*x_Vg4k|2_XMWKOX9VN^9K?PW42 z?G-nW{Kdl4l+RQ*X5yUjBO;%qrE<-Fd3WA=0g0>%wJLDSBNuiji^!vY4`7IZ^(n@n z!JK}PD#6%xYK?u?>@d7d)jc;{s1!m`@RKPynxK>*KZ7cmMR(w@(!+3UJ$+qSS>$>v zuc_l`Qv#~pqF!qjQaZ(X&##!|gT`#sZ#$z{KlLFHa@|kag%b&WwEsC#AN#4(KE{~f zx1K;8w~iVP(I@b8yGEL%K&*m->_b;jd^CrY$@C$kb~b@IVk1h{@k*U#zi|9c9t|rh z($={Du77*k>T6rJvN>{2rAlmbgSES9l`&gN>x2E%pdZ^Hr!62-h5Xdis+ingXu@oX z6>?E2t;~Tt+agFaOm!ApP0##<_-yfXkhVu|8q!}5Wi~O#f2G0de4M_?&m4KFdHEhs zJ9Od2ko+`t{ir3ZS z*)1i?6(Z-?O^eWd0s({Ehv6m6-TDY@v;Bt1HPt!>DpsTENRsL~^3X#Y6Z}JcI~RY zFpq!TA{LAHLBG(;?|6%|<$e*LyLHXt5vr;gy&dC<#Hhe_7uuAtJOGIBd6Zmdg53aY z7ldYK1F!A8vTP(2!C3PUo~bAoYaY2tqwr!GJ6v}g)8@aZ)>x^-35uh`7Yi6KJv2O{ zd1&>QU%(lilC)k79rA|4R8@+OZJVeiKQ5{G8wt}6O(8a%OFXD%x!HigC@~N(a%Jw} zYKbPcz{_M?zLqE=it3PtvoI((y_OeqaQ0T-ysUL+<%##BY z&y}yEro}hFmr^bo2S0}+={OoQ-Slb8>h&aHHWwM#BcTgDea0+s(1gm0X7{lZ_@yENH^&0+}C6! zqMN#9OHNG0NrD#`88pSF165haHHT_aDxz|EYX!v4Y`BbyTv*31!LLG4CpY{4O5>&u ztTY#Une?#O45qp?@8#0AyzIAan`_zfqN;w+g0Up76`_ZmmTR$IDm&kp%k;IP;o&@K zb0iUMtM|&uSI=6#O-oQ2+dntnvY3i4{!mEP8E3{i(V1#+6hQqR0lX}*C8spseE#pv zTF!nG8h^l##4nmLEtcbHJ$C5| zgGOSYReoKPXp=y#G&CAgTZ;b-iv==2l{6t(vD{RN*^=zZ>K>;ocqD)HB_Mb);=)^W z0m6yGyzD%W_^pp|YhOw{qU zE29_ex2y({K=w^`M1B}GCC@UznESYCvXaH;BJ$~-jafI@Yhj$GsKlui5jj(4=eLoR z4uY%qSOq7dc4;Hbf!bmJ%uIBLW$E--=Fm-ggIxRnu=|5F(L%L{RIj70{b`?jI<|2V zSU`tx1J>TXJTv9q6<_MTUx41hfxP^U!kU;?`}gVj_wl~*zIU9Q8t?a?tlcGskJn8B zpWj1)pT}FjpW}b>rFS>pZ-U=`0`JYfugk1o^OU^h@f3!>UjmwM+TSFXwXl_%G0F9t zj+y!(`UYD0ljUnTDRM z4%*H`$~Sg31k_moq`Ppveww$3`rv-8R37SZ5Vs2Pw(8iqvj!d3O=`QdiB%4Yv&GY@ zNGF^?Uyw=B-oX=F9{BOm+~U3}bf z*iIH$*92KyV%0vKUDWU{`QWIt)VsmKfs_W*2 z-4a|}dsFXj-Z0s)U02y$YxO)SLbQ7EkIzyWiX9*SfZKkR8=gFBo56@xECYx}jz*{boTLX>E7wGK*a66{aX%skL66r!R=VPJba6 zGu!|u%DT(liR3UtV@)nS$(gZWIjNc~RYtu>Oa2o8FKCLF#K^?F8yN4=ocxmYXem8) zr<_4IhT)S3ZAw-uNo|M_?#Y4>1sTYKHxLa4Ra0G#jo77Jl*n(J{AC!qdwqYRTz}WP$QkSr@zJLC1RCKQ3Juc6uE#EryWn@l2&N(kLTE?=(smQS%=KA zR=Y04iK^!iWcF!p0uf*7yLy;TXeGit?jftiwu`S&ah6~pP-G5oQ zu&ttqC-{vM&+PG?ou=zE2m_Swbp*`orz;2p8-qUgS{&)sY>Q&`+nM9(MOc8-01|Sm zW|q`QEYa0BlA$DcYe97b51&T7maRNB<{pbVt|?gij!*`zUK zN9Ca-K)4J6B}R#TfQ%@^7x&HC|DO=Fg}s(=l!h72%GRDds@-gfCkfl@R_F3eL;iXU zazyq~jXRSfv3+&(j`AmY-WjiE8GqeH_&ft8M(wt&BQH)n&K>u}V-GV^{nJ8D3xC|D zeJ=rTu2hNp0yl}{tZ0#M#2|M3LrL%G_qGuhnxTT5)NQn}>WCw$bGG;73y$A;auR|| z?$_t@F-+d?>+vSU|U0*fvOpl zkFcA)AQK@_%*tzpF^d{I13o~V&naYoRZv<*bID4N{S#L2uMmugCN~V2+|=bpp-Bm(p`7X z`OZ1(9=?0`AA9Zfyw866*=zstuHRNdn`i`2+(O@RC?!|wy2sq5?$jRTFD9uchQEJI z0ZM9rEKD#ULd|=-hpYYQp|xVZ6ewvQ*A_iFb8pF=09)22lv&f@VN z-0nubV=Q_Z-crP#U(0qYb($1sbR+D%F50ZzefCmm@RRPZ?9PM{r06TR3{oETp{K^w zJ<`?n-4(BCe)|-+;%nrQ@kk@mDD2fbEGpP^5r;4$sT#*sVny;wwEI+_WF({D$RH?e z2Beno`z8;OgpT+6^YZAK`kTZfYB8ZHai(+3GrM&BV+o%q5|gw zZb+8BPueZfWz)KoPE2I)D(Lj{jtNIr@%U~$uZpGsnQd=@-@VR-?r3|Rg!Q5$UrGIA z0QI0x_g6tezom_38MJLV>EVC)nkt_-+!)A!4~nT zv9ZeC30gC+25Th9kr)8vax12qI=)qczY$uJdlkBY7(i`0J7Reaou>57n1DZiK&AD| zXJ3$h&|&IU;J2-$ud5mQLwYx<-#q%4`M@mO0w5Kf0>0d-BP`0FQ2hp#Px~eM)?vY; zIawb|!@Ye^oLjuSCPe3h{EJMnXEt%ceX1*FEsHrbKjgzT4RXfLL+m?-9@M(Av$)=n zw5bXfsd~&og+G8^fx{;9-&!uwgE*mIO@%>L6rN%2f1Sf*Sp;rO?XLrv}e15*E{ zC$4K;3?;N!HoY5qvOET+X5SSH4&(>ar9io!8yb3_kGMp9cb?#GZC{nRo9mz>9sfLI z6x(#3Uq-MgPMvYL|0Hb4eDT2k_d{wg<>Ji;X%kd3u6*A{tzTh=E55ilRwRvmA&J1; z?O59LapPWEz@~&eyvt`_!hmMlidC1T3PYtWd;WGrUV(ddBGp4R+J5BfGv~~|Th`e} zhHIl_$5#G0gB1s8NmXkWw9f@442*7LhRSGFhth1Td}IeM{APVRmjFYA-NQ{YDt+?) zJd+DAR6NRIx+&#`J1qMnSa!?pGp2YUyS~1yGT(O-e>m$y*GD&Zeb+P%UCx*3eI9$C z{0tvY!USG!V`Bm@FZU*Y$Xx8Q-Mt)(Y5G~+dVVl)70|8EhFw_+53FA;_KP?6xQ%8e zQl)5NddE4{$po>XpJ|X2GC7IRdTD2Trp}%_r>2!8lYTgB)j7qd$T8?CP{R->6BCMf zTszitOL~%m^lx8wUMOJN~`%Lx%mVmpU)$<(Y;DpLdcBJhz! z6_qUS%*Zj2Sir1k*XUMF!gY7K~PDP{UoiHa69S&^E!cz81 zSA__}_YvoJIOJzGWmy**Gkdx)5@ukXew?A}wx z{sJ2XL*imna(|g;58cINgv~wndP<@L+U?03Id7_XJCzKN@sB=%usgwFhP@1S6H@>?O?Y;SnoI3{gFQYQ#Te~HM4$e z^0V#T!T?q?$c*c^016jQ!ZTSeJ~P8Ir}gu$&>rjWqaCN&33}_EH$5YJ?GMW$MKy^c}oj-EJ7om2d&mtfc0_7spF(E_G!N!N^7SDxM0Pg9b~)wTWVnNO3~!=a*+ zX*n0`#d(B7D%8Fgk!X4`gR(>o`rXuEu|dqXjG%9EP%iCBbpo=Uy;xZynY)qWS-cgm zCG@pfo68YDQpifg75C^S8GE#BNa_Q_OvfxLVlqL-9z`(;qeKrn@qt(xXJKVZK{lOr zt%K6uSt6a+0&F(Q{mmkzbl3^3!Lj)03|~6NrU%3;YTm>4$R~mynJ>n}#z0YO-z|70 zmF2@1m@Rqv!Zr^CN>ZG=VoKO2Jx8E}g2$eas)w!a-iHkD@)9Y-0W+_>ILfZM zE`sXk9ooM&F!p^qS>Nf}mZ{l{$UE@!L`ud=mh&OO2~duq8h5!CxX)$9GXuFj#1oje zq})79E zQ878cX@Q3EUfu+Y1ns@yex;&czx(y?k`Hy}%`X(;MRvZHV7?FKysN->XHdwp+*sY; zQ#P{u&zg{WS<2QGs&&R=sV;7<5$}S~lp!|AphK;4y4m|p6%Nb@&74^2z(Exfm*=P{ zl|qBGlsrxKAHbjk(636k46XV;RT;8u^D&t=XvQmlS}$MMv+yoDgdK=KgKY>!h(FKK zrbQ;@H?1T=CwmOGADPaAf}#X5??R<_lDq5Q6RC(ZX-)6Dx75^=B^Kl;Fgf6rymlAV zC)?U^($m3$nc${AjUA5s{6fywiQGu z#!94*mY)7r60qpyA|IA*`bb@fldhP!aiztCNmwK-EDYj<^kkc}`N4wxCSc+E(n14L z7z3%O-N&}Y-gb0TEDd*srHA+ONNLqBm&VRkf0bnZB=M>^>P^kwuWTjF?7ES;wOzs@ zb?=h}rgTfsJDgJr{wMD+Q!A6c&92;`)CG_I&FNU@tF;$YT69|tmx<(3Tlz_5408R@ z;c3fShmT$OL^p1Trd2bLsH{$CsI&ofnk0`T6rn8zQ+V9Y8{(XiD|)E6 zWtM5Lf|zn ze=FZ93?P zqB~+uye~VmVC98Ar{NXB&sq0misbv?Rnz=|=+Yy=5BB8zaKq=+=h%+%v9~h?;bF;f z`9)(=L+7bz*9c{w$tflP_`@zFgui1aOD;oY=hjRp(dyG)@+=~5>!$2P`8XI)cSq>k z7Se60UjMuyjyLZ3y(jj(inO;jdDV{$DXp1if*_5RpEEFoIJi9a8#8#aG7rDp7HCE$ zRdkQms024mN>^Jvqw6crZI{s8B;q$=?0ss7-)r4`D}tf(J&Tr*Ls#quo`+Gbb3upW zq2qV@`zuiqpju_u>3Wk`mmC2%WTC{?|kZ+pY4f;)4& zzRN=s%3Hhz+wuyJaygv^mK~M*iZ~1u<_B-=jFfTZO6qpUE#H=R7JX0}Ndmm<-s~__ z!dqA&L_%n<7;cc$jPAC@s2l$#=V(HCAK_)~Kes%6%~G!kBmpW+!t1}xu{i>%;#_`!gIBADwXW5$rv%==PMbh80f zF}>T74|Dq4KcQKp@5D5b$u~gBC`nb!GybBV@2B6EVNuzzMf^|8k_)u{o;UtE8cBNz5F&@wH>Qp~8SgJGsc$kPa%Cpf>|BP11({tk<8x*49ei2V^JS}Y zNxs1B6ErPzi~6x_qe-OGjT)SJXhvVI2i5wg=nQnqLvzy2)6O(4Uxk5k(BV_krE%Vl zLJIF!1|zN&1SMN3+9qHu$?EN!$#%u6*msjVr?{tvXC{g!PN{mvNoK+(;JFKU_RvyF zagMtM?go`SCD@3SNMZkk%ue-BUDL^^gc&Z=zyB!+}b880;%EjZ9LB|$j_#Zvg{%< zu~4d@iry5d_EzEUuXXs&-j((o_hQsX!vmwCf}hO9D7Gg#R@;=1bw`hm7vI@B zo+#zCk1?;G6)?BgCezqTwT#k}iFr%;f<}b;WI~B~k;0#kJBA;%B_(dMiKVH<$v29y z^}m&$q?C2{s_@MtyA~Qui*R2z;`drrYe6WhELylK>r-Bg@hhA->UD?))dAN=Q6O zf%VJITGF?lC4!`nK8b9`I4SWwm~!gV>^spJ36(62vS!8`eDQ{Z*Y{0k2ZFsD)FXAt z3ss%Zy_h?lx#Dd1C^o)?b{$eNrmmJ*Wp5|~^&JY{PD9;O&EW#=Al zg@{){@fQZ12<_XRJe8Va%J0m$6ADNp4-HENUgtLK50087Ac0GQmVKNWE6I63A8^SJ z!RITZ`9YmD&&id~$#I|&lpdvaH4BH7m6;x~NJwaCo=d+Yv^~>}X>8#Tw0IQ(_I$*+ zloQijqJc%3KmhetKx5)yBvCOFBHQ+}*+c3g(TJ%-Pcw#4;6Ez&uz#74SX zjy{qRsRLxIc733kCKKJo{h+0Kk0~zKVkE+vnEWJTD_8rH;+V~)PI)!yW9u%n$gsu3RdNFjI#u2Ulp!frX8J(=mTj zo0+#uptxjWtWV2qo70~;?sL4sre)Kb+KeY57`6c42Co}e?CF`iW9yHZb>DgCK3k|T1C z#=1E6)wmDT=6YTYSV>vxx3zedrP^T9^as5flAVS})AKezP8P4AMf=@dhv)gN8@`ty zC53H@9AIR*GDZZ#^K=euW%+yZv$SpSs{QqGd#s}KUe)T}WUyDYLFwmr&dU@e@1y%% zwu4U=-3;=hZ-GBn_>>zBby7IC;rFXKj!*agDA$iSsZfz3bV|H;MiG_1P_z3>iWkdz zsU3-I8^~)Vxs>SH-L~kw%!v<%@`QEWvJRA$4droh@uDDXjK9(LAzo2xq4D5sp)X^d z`w68d5V-o|s^O+e$TF(quHX*FEB?6A`$wxQH6P}4o0wD3h0MmTsMyxeqYsJsA&HEW zmy=|ZJd?#kO^8>`^fJ5INsCX(OTLVc+ZfH+O!+u(t;w9%za6`p)L&_+T7`G-qWTlq zCoH%Ef@=57`TCVQO`F7M?Gt~4F!;y&o`*@ryalQ^+m=Z?$6hslm9d>TQoI8ef0AxmGB0AI zRrj;bd}bjP>nHf^W_x3g)}6g*#+mrQez8>!(clq>C{|3K4Q9(~*~~{=R`_0tJ|?^| zd3a%L(N;sZ`H|@8XUgJ%O8jhDLYK3T2ppuh0ySrs;*}=F+$-VM-X9BCo2;XTngbIe zy~FOs?#$d|$Bu|$ueK|2>+H6wpKwiW=V0K}$JF#M<{Wr!A4i25*IO&37nQWB(-=tC zGPo^X+BYiO->Hfj;-O?|Om)K#?G3LV*8nl|#}8!)>~~#(wdZR5oUdF>{$y^MuFvUu zvXzP}e>Zn4mEYrsf*CZVkX0iFhq512-WOZcIvG8O9d{H>eao_6dg^B532zlOrS?Ah zTGn2cZQ5P0b@!L^cG$ihQ}~WSj_kuVJ~Wm-?30FoYADrZLyJjBcHUOyJnNgnKanOo zjrJ}+Tb^asn|o>6#h}!&g$py%xnT>Fk4JZlGsdz~+E4YwGyUckn+<(S+IOMoz=V|r ze0VLk4=AcILZ6kpoAU@>D!YTZxQK>aHa#5jsv!OD(jYkW-ODuD7?Lm3`SrmFv`==S z(CLjhD_Sv%ppdDjFo=gey`SKI*6m=r_o~fA?oY*Zj$_i7=Mc)*Q?|Po7s% zEJNga)l~X47-a5L!47ts&Q!{KUvCS9hI^%D^h$wZym!`;y9>RDp2c=wYTtbIGD%f_ z{R8{qMq^tQ-}DgKK4Lu|x`U{Hb7Cd4w>SpQ9NR7w3w57g{+yp!cDI!jfXCp1ul|2P zjllnfG-3?HEp$vMQBGDE3wufk;9qnpDKSMCl)0N7#!1QC4JDu?0RjMF05AXugh8P| zkT3wm4*>8B{>6oH5s=62M{ztrqupKIT+H1tE*y%wP(d*bjMe|MANbdPEfz zA>cp|;+j014%l^Dpb!`Uc#RJZfWvWUF#j0;u^A2(hhNhHfI}gO>pI|21oUt6Kmf$w zX%Hazx()yWaynqDA49CSUuKrlcYd`$-sg7f2Atbi~O z5Pn@A0t^9M%Oy}82*PFizx)M?LvbSin+C!~4|I(W1c8FYuh{~EiNmhv_a8O+ziR{6 zs&;6U3ndw@-*xSLP`E}C)4^ceIB>PXp(FOd$r{7)&ocqHRx*}Sh5;c!kdh)?K^Y2A zgeWK=an8v@5J;dhLLR7~BqIe-MuOxOzz_ro00H1~fPerH@^GLcR2+nag5mNqCb;G$ bb9FO!ar>)xaTb9=a7r>>US&-rCE5P~0p|_* diff --git a/logo/usage_guidelines.md b/logo/usage_guidelines.md deleted file mode 100644 index 9a081235e85..00000000000 --- a/logo/usage_guidelines.md +++ /dev/null @@ -1,16 +0,0 @@ -# Kubernetes Branding Guidelines - -These guidelines provide you with guidance for using the Kubespray logo. -All artwork is made available under the Linux Foundation trademark usage -[guidelines](https://www.linuxfoundation.org/trademark-usage/). This text from -those guidelines, and the correct and incorrect usage examples, are particularly -helpful: ->Certain marks of The Linux Foundation have been created to enable you to ->communicate compatibility or interoperability of software or products. In ->addition to the requirement that any use of a mark to make an assertion of ->compatibility must, of course, be accurate, the use of these marks must ->avoid confusion regarding The Linux Foundation’s association with the ->product. The use of the mark cannot imply that The Linux Foundation or ->its projects are sponsoring or endorsing the product. - -Additionally, permission is granted to modify the Kubespray mark for non-commercial uses such as t-shirts and stickers. diff --git a/meta/runtime.yml b/meta/runtime.yml index b1198d77732..f3791fb8514 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,2 +1,2 @@ --- -requires_ansible: ">=2.17.3" +requires_ansible: ">=2.16.14" diff --git a/pipeline.Dockerfile b/pipeline.Dockerfile deleted file mode 100644 index d6d6ebcd03e..00000000000 --- a/pipeline.Dockerfile +++ /dev/null @@ -1,60 +0,0 @@ -# Use imutable image tags rather than mutable tags (like ubuntu:22.04) -FROM ubuntu:jammy-20230308 -# Some tools like yamllint need this -# Pip needs this as well at the moment to install ansible -# (and potentially other packages) -# See: https://github.com/pypa/pip/issues/10219 -ENV VAGRANT_VERSION=2.4.1 \ - VAGRANT_DEFAULT_PROVIDER=libvirt \ - VAGRANT_ANSIBLE_TAGS=facts \ - LANG=C.UTF-8 \ - DEBIAN_FRONTEND=noninteractive \ - PYTHONDONTWRITEBYTECODE=1 - -RUN apt update -q \ - && apt install -yq \ - libssl-dev \ - python3-dev \ - python3-pip \ - sshpass \ - apt-transport-https \ - jq \ - moreutils \ - libvirt-dev \ - openssh-client \ - rsync \ - git \ - ca-certificates \ - curl \ - gnupg2 \ - software-properties-common \ - unzip \ - libvirt-clients \ - qemu-utils \ - qemu-kvm \ - dnsmasq \ - && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ - && add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \ - && apt update -q \ - && apt install --no-install-recommends -yq docker-ce \ - && apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/* - -WORKDIR /kubespray -ADD ./requirements.txt /kubespray/requirements.txt -ADD ./tests/requirements.txt /kubespray/tests/requirements.txt - -RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \ - && pip install --no-compile --no-cache-dir pip -U \ - && pip install --no-compile --no-cache-dir -r tests/requirements.txt \ - && pip install --no-compile --no-cache-dir -r requirements.txt \ - && curl -L https://dl.k8s.io/release/v1.33.4/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \ - && echo $(curl -L https://dl.k8s.io/release/v1.33.4/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \ - && chmod a+x /usr/local/bin/kubectl \ - # Install Vagrant - && curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \ - && dpkg -i vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \ - && rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \ - && vagrant plugin install vagrant-libvirt \ - # Install Kubernetes collections - && pip install --no-compile --no-cache-dir kubernetes \ - && ansible-galaxy collection install kubernetes.core diff --git a/playbooks/ansible_version.yml b/playbooks/ansible_version.yml index a07b401e6d1..e2db9f79363 100644 --- a/playbooks/ansible_version.yml +++ b/playbooks/ansible_version.yml @@ -5,7 +5,7 @@ become: false run_once: true vars: - minimal_ansible_version: 2.17.3 + minimal_ansible_version: 2.16.14 maximal_ansible_version: 2.18.0 tags: always tasks: diff --git a/scripts/gitlab-runner.sh b/scripts/gitlab-runner.sh deleted file mode 100644 index c05ee7ea188..00000000000 --- a/scripts/gitlab-runner.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -docker run -d --name gitlab-runner --restart always -v /srv/gitlab-runner/cache:/srv/gitlab-runner/cache -v /srv/gitlab-runner/config:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:v1.10.0 - -# -#/srv/gitlab-runner/config# cat config.toml -#concurrent = 10 -#check_interval = 1 - -#[[runners]] -# name = "2edf3d71fe19" -# url = "https://gitlab.com" -# token = "THE TOKEN-CHANGEME" -# executor = "docker" -# [runners.docker] -# tls_verify = false -# image = "docker:latest" -# privileged = true -# disable_cache = false -# cache_dir = "/srv/gitlab-runner/cache" -# volumes = ["/var/run/docker.sock:/var/run/docker.sock", "/srv/gitlab-runner/cache:/cache:rw"] -# [runners.cache] diff --git a/test-infra/vagrant-docker/Dockerfile b/test-infra/vagrant-docker/Dockerfile deleted file mode 100644 index 7a0f0e08da4..00000000000 --- a/test-infra/vagrant-docker/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Docker image published at quay.io/kubespray/vagrant - -ARG KUBESPRAY_VERSION -FROM quay.io/kubespray/kubespray:${KUBESPRAY_VERSION} - -ENV VAGRANT_VERSION=2.3.7 -ENV VAGRANT_DEFAULT_PROVIDER=libvirt -ENV VAGRANT_ANSIBLE_TAGS=facts - -RUN apt-get update && apt-get install -y wget libvirt-dev openssh-client rsync git build-essential - -# Install Vagrant -RUN wget https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_amd64.deb && \ - dpkg -i vagrant_${VAGRANT_VERSION}-1_amd64.deb && \ - rm vagrant_${VAGRANT_VERSION}-1_amd64.deb && \ - vagrant plugin install vagrant-libvirt diff --git a/test-infra/vagrant-docker/README.md b/test-infra/vagrant-docker/README.md deleted file mode 100644 index 36dcb9e9622..00000000000 --- a/test-infra/vagrant-docker/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# vagrant docker image - -This image is used for the vagrant CI jobs. It is using the libvirt driver. - -## Usage - -```console -$ docker run --net host --rm -it -v /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock quay.io/kubespray/vagrant -$ vagrant up -Bringing machine 'k8s-1' up with 'libvirt' provider... -Bringing machine 'k8s-2' up with 'libvirt' provider... -Bringing machine 'k8s-3' up with 'libvirt' provider... -[...] -``` - -## Cache - -You can set `/root/kubespray_cache` as a volume to keep cache between runs. - -## Building - -```shell -./build.sh v2.12.5 -``` diff --git a/test-infra/vagrant-docker/build.sh b/test-infra/vagrant-docker/build.sh deleted file mode 100755 index dcf54456b91..00000000000 --- a/test-infra/vagrant-docker/build.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -set -euo pipefail - -if [ "$#" -ne 1 ]; then - echo "Usage: $0 tag" >&2 - exit 1 -fi - -VERSION="$1" -IMG="quay.io/kubespray/vagrant:${VERSION}" - -docker build . --build-arg "KUBESPRAY_VERSION=${VERSION}" --tag "$IMG" -docker push "$IMG" From 173e61a81a9ac86355fbee88920e199903844e34 Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Sat, 6 Sep 2025 22:15:40 +0700 Subject: [PATCH 02/10] update setting --- README.md | 162 +- docs/cloud_controllers/openstack.md | 134 - docs/cloud_controllers/vsphere.md | 134 - docs/cloud_providers/aws.md | 95 - docs/cloud_providers/azure.md | 125 - docs/cloud_providers/cloud.md | 15 - .../group_vars/k8s_cluster/addons.yml | 30 +- .../group_vars/k8s_cluster/k8s-cluster.yml | 10 +- .../group_vars/k8s_cluster/k8s-net-calico.yml | 126 - .../group_vars/k8s_cluster/k8s-net-cilium.yml | 24 +- .../k8s_cluster/k8s-net-custom-cni.yml | 51 - .../k8s_cluster/k8s-net-flannel.yml | 18 - .../k8s_cluster/k8s-net-macvlan.yml | 6 - playbooks/cluster.yml | 9 - .../container-engine/containerd/meta/main.yml | 1 - roles/container-engine/cri-o/meta/main.yml | 1 - .../kata-containers/defaults/main.yml | 10 - .../molecule/default/converge.yml | 11 - .../molecule/default/files/10-mynet.conf | 17 - .../molecule/default/files/container.json | 10 - .../molecule/default/files/sandbox.json | 10 - .../molecule/default/molecule.yml | 28 - .../molecule/default/verify.yml | 23 - .../kata-containers/tasks/main.yml | 54 - .../templates/configuration-qemu.toml.j2 | 706 ----- .../templates/containerd-shim-kata-v2.j2 | 2 - roles/container-engine/meta/main.yml | 36 - .../nerdctl/handlers/main.yml | 12 - roles/container-engine/nerdctl/tasks/main.yml | 36 - .../nerdctl/templates/nerdctl.toml.j2 | 9 - roles/container-engine/skopeo/tasks/main.yml | 32 - .../container-engine/youki/defaults/main.yml | 3 - .../youki/molecule/default/converge.yml | 11 - .../molecule/default/files/10-mynet.conf | 17 - .../molecule/default/files/container.json | 10 - .../youki/molecule/default/files/sandbox.json | 10 - .../youki/molecule/default/molecule.yml | 28 - .../youki/molecule/default/verify.yml | 19 - roles/container-engine/youki/tasks/main.yml | 12 - roles/kubernetes/control-plane/meta/main.yml | 1 - .../network_plugin/calico/files/openssl.conf | 27 - roles/network_plugin/calico/handlers/main.yml | 31 - roles/network_plugin/calico/meta/main.yml | 3 - .../calico/rr/defaults/main.yml | 5 - roles/network_plugin/calico/rr/tasks/main.yml | 16 - roles/network_plugin/calico/rr/tasks/pre.yml | 15 - .../calico/rr/tasks/update-node.yml | 50 - .../calico/tasks/calico_apiserver_certs.yml | 60 - roles/network_plugin/calico/tasks/check.yml | 235 -- roles/network_plugin/calico/tasks/install.yml | 510 ---- roles/network_plugin/calico/tasks/main.yml | 9 - .../calico/tasks/peer_with_calico_rr.yml | 86 - .../calico/tasks/peer_with_router.yml | 116 - roles/network_plugin/calico/tasks/pre.yml | 36 - roles/network_plugin/calico/tasks/repos.yml | 21 - roles/network_plugin/calico/tasks/reset.yml | 30 - .../calico/tasks/typha_certs.yml | 52 - .../templates/calico-apiserver-ns.yml.j2 | 10 - .../calico/templates/calico-apiserver.yml.j2 | 301 -- .../calico/templates/calico-config.yml.j2 | 106 - .../calico/templates/calico-cr.yml.j2 | 213 -- .../calico/templates/calico-crb.yml.j2 | 28 - .../calico/templates/calico-ipamconfig.yml.j2 | 8 - .../calico/templates/calico-node-sa.yml.j2 | 13 - .../calico/templates/calico-node.yml.j2 | 513 ---- .../calico/templates/calico-typha.yml.j2 | 186 -- .../calico/templates/calicoctl.etcd.sh.j2 | 6 - .../calico/templates/calicoctl.kdd.sh.j2 | 8 - .../kubernetes-services-endpoint.yml.j2 | 11 - .../calico/templates/make-ssl-calico.sh.j2 | 102 - roles/network_plugin/calico/vars/amazon.yml | 5 - roles/network_plugin/calico/vars/centos-9.yml | 3 - roles/network_plugin/calico/vars/debian.yml | 3 - roles/network_plugin/calico/vars/fedora.yml | 3 - roles/network_plugin/calico/vars/opensuse.yml | 3 - roles/network_plugin/calico/vars/redhat-9.yml | 3 - roles/network_plugin/calico/vars/redhat.yml | 4 - roles/network_plugin/calico/vars/rocky-9.yml | 3 - .../calico_defaults/defaults/main.yml | 177 -- roles/network_plugin/cni/defaults/main.yml | 2 - roles/network_plugin/cni/tasks/main.yml | 16 - .../custom_cni/defaults/main.yml | 11 - roles/network_plugin/custom_cni/meta/main.yml | 20 - .../network_plugin/custom_cni/tasks/main.yml | 29 - .../network_plugin/flannel/defaults/main.yml | 28 - roles/network_plugin/flannel/meta/main.yml | 3 - roles/network_plugin/flannel/tasks/main.yml | 38 - roles/network_plugin/flannel/tasks/reset.yml | 24 - .../flannel/templates/cni-flannel-rbac.yml.j2 | 52 - .../flannel/templates/cni-flannel.yml.j2 | 172 -- .../network_plugin/kube-ovn/defaults/main.yml | 135 - roles/network_plugin/kube-ovn/tasks/main.yml | 26 - .../templates/cni-kube-ovn-crd.yml.j2 | 2587 ----------------- .../kube-ovn/templates/cni-kube-ovn.yml.j2 | 912 ------ .../kube-ovn/templates/cni-ovn.yml.j2 | 674 ----- .../kube-router/defaults/main.yml | 69 - .../kube-router/handlers/main.yml | 20 - .../network_plugin/kube-router/meta/main.yml | 3 - .../kube-router/tasks/annotate.yml | 21 - .../network_plugin/kube-router/tasks/main.yml | 84 - .../kube-router/tasks/reset.yml | 28 - .../kube-router/templates/cni-conf.json.j2 | 27 - .../kube-router/templates/kube-router.yml.j2 | 228 -- .../kube-router/templates/kubeconfig.yml.j2 | 18 - .../network_plugin/macvlan/defaults/main.yml | 6 - .../network_plugin/macvlan/files/ifdown-local | 6 - .../macvlan/files/ifdown-macvlan | 40 - roles/network_plugin/macvlan/files/ifup-local | 6 - .../network_plugin/macvlan/files/ifup-macvlan | 43 - .../network_plugin/macvlan/handlers/main.yml | 15 - roles/network_plugin/macvlan/meta/main.yml | 3 - roles/network_plugin/macvlan/tasks/main.yml | 110 - .../macvlan/templates/10-macvlan.conf.j2 | 15 - .../macvlan/templates/99-loopback.conf.j2 | 5 - .../templates/centos-network-macvlan.cfg.j2 | 13 - .../templates/centos-postdown-macvlan.cfg.j2 | 3 - .../templates/centos-postup-macvlan.cfg.j2 | 3 - .../templates/centos-routes-macvlan.cfg.j2 | 7 - .../templates/coreos-device-macvlan.cfg.j2 | 6 - .../templates/coreos-interface-macvlan.cfg.j2 | 6 - .../templates/coreos-network-macvlan.cfg.j2 | 17 - .../templates/coreos-service-nat_ouside.j2 | 6 - .../templates/debian-network-macvlan.cfg.j2 | 26 - roles/network_plugin/meta/main.yml | 38 - roles/network_plugin/multus/defaults/main.yml | 10 - .../multus/files/multus-clusterrole.yml | 28 - .../files/multus-clusterrolebinding.yml | 13 - .../multus/files/multus-crd.yml | 45 - .../multus/files/multus-serviceaccount.yml | 6 - roles/network_plugin/multus/meta/main.yml | 3 - roles/network_plugin/multus/tasks/main.yml | 54 - .../multus/templates/multus-daemonset.yml.j2 | 100 - roles/network_plugin/ovn4nfv/tasks/main.yml | 16 - 133 files changed, 45 insertions(+), 10890 deletions(-) delete mode 100644 docs/cloud_controllers/openstack.md delete mode 100644 docs/cloud_controllers/vsphere.md delete mode 100644 docs/cloud_providers/aws.md delete mode 100644 docs/cloud_providers/azure.md delete mode 100644 docs/cloud_providers/cloud.md delete mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml delete mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml delete mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml delete mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml delete mode 100644 roles/container-engine/kata-containers/defaults/main.yml delete mode 100644 roles/container-engine/kata-containers/molecule/default/converge.yml delete mode 100644 roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf delete mode 100644 roles/container-engine/kata-containers/molecule/default/files/container.json delete mode 100644 roles/container-engine/kata-containers/molecule/default/files/sandbox.json delete mode 100644 roles/container-engine/kata-containers/molecule/default/molecule.yml delete mode 100644 roles/container-engine/kata-containers/molecule/default/verify.yml delete mode 100644 roles/container-engine/kata-containers/tasks/main.yml delete mode 100644 roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 delete mode 100644 roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 delete mode 100644 roles/container-engine/nerdctl/handlers/main.yml delete mode 100644 roles/container-engine/nerdctl/tasks/main.yml delete mode 100644 roles/container-engine/nerdctl/templates/nerdctl.toml.j2 delete mode 100644 roles/container-engine/skopeo/tasks/main.yml delete mode 100644 roles/container-engine/youki/defaults/main.yml delete mode 100644 roles/container-engine/youki/molecule/default/converge.yml delete mode 100644 roles/container-engine/youki/molecule/default/files/10-mynet.conf delete mode 100644 roles/container-engine/youki/molecule/default/files/container.json delete mode 100644 roles/container-engine/youki/molecule/default/files/sandbox.json delete mode 100644 roles/container-engine/youki/molecule/default/molecule.yml delete mode 100644 roles/container-engine/youki/molecule/default/verify.yml delete mode 100644 roles/container-engine/youki/tasks/main.yml delete mode 100644 roles/network_plugin/calico/files/openssl.conf delete mode 100644 roles/network_plugin/calico/handlers/main.yml delete mode 100644 roles/network_plugin/calico/meta/main.yml delete mode 100644 roles/network_plugin/calico/rr/defaults/main.yml delete mode 100644 roles/network_plugin/calico/rr/tasks/main.yml delete mode 100644 roles/network_plugin/calico/rr/tasks/pre.yml delete mode 100644 roles/network_plugin/calico/rr/tasks/update-node.yml delete mode 100644 roles/network_plugin/calico/tasks/calico_apiserver_certs.yml delete mode 100644 roles/network_plugin/calico/tasks/check.yml delete mode 100644 roles/network_plugin/calico/tasks/install.yml delete mode 100644 roles/network_plugin/calico/tasks/main.yml delete mode 100644 roles/network_plugin/calico/tasks/peer_with_calico_rr.yml delete mode 100644 roles/network_plugin/calico/tasks/peer_with_router.yml delete mode 100644 roles/network_plugin/calico/tasks/pre.yml delete mode 100644 roles/network_plugin/calico/tasks/repos.yml delete mode 100644 roles/network_plugin/calico/tasks/reset.yml delete mode 100644 roles/network_plugin/calico/tasks/typha_certs.yml delete mode 100644 roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-apiserver.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-config.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-cr.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-crb.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-node-sa.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-node.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-typha.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 delete mode 100644 roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 delete mode 100644 roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 delete mode 100644 roles/network_plugin/calico/vars/amazon.yml delete mode 100644 roles/network_plugin/calico/vars/centos-9.yml delete mode 100644 roles/network_plugin/calico/vars/debian.yml delete mode 100644 roles/network_plugin/calico/vars/fedora.yml delete mode 100644 roles/network_plugin/calico/vars/opensuse.yml delete mode 100644 roles/network_plugin/calico/vars/redhat-9.yml delete mode 100644 roles/network_plugin/calico/vars/redhat.yml delete mode 100644 roles/network_plugin/calico/vars/rocky-9.yml delete mode 100644 roles/network_plugin/calico_defaults/defaults/main.yml delete mode 100644 roles/network_plugin/cni/defaults/main.yml delete mode 100644 roles/network_plugin/cni/tasks/main.yml delete mode 100644 roles/network_plugin/custom_cni/defaults/main.yml delete mode 100644 roles/network_plugin/custom_cni/meta/main.yml delete mode 100644 roles/network_plugin/custom_cni/tasks/main.yml delete mode 100644 roles/network_plugin/flannel/defaults/main.yml delete mode 100644 roles/network_plugin/flannel/meta/main.yml delete mode 100644 roles/network_plugin/flannel/tasks/main.yml delete mode 100644 roles/network_plugin/flannel/tasks/reset.yml delete mode 100644 roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 delete mode 100644 roles/network_plugin/flannel/templates/cni-flannel.yml.j2 delete mode 100644 roles/network_plugin/kube-ovn/defaults/main.yml delete mode 100644 roles/network_plugin/kube-ovn/tasks/main.yml delete mode 100644 roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 delete mode 100644 roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 delete mode 100644 roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 delete mode 100644 roles/network_plugin/kube-router/defaults/main.yml delete mode 100644 roles/network_plugin/kube-router/handlers/main.yml delete mode 100644 roles/network_plugin/kube-router/meta/main.yml delete mode 100644 roles/network_plugin/kube-router/tasks/annotate.yml delete mode 100644 roles/network_plugin/kube-router/tasks/main.yml delete mode 100644 roles/network_plugin/kube-router/tasks/reset.yml delete mode 100644 roles/network_plugin/kube-router/templates/cni-conf.json.j2 delete mode 100644 roles/network_plugin/kube-router/templates/kube-router.yml.j2 delete mode 100644 roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 delete mode 100644 roles/network_plugin/macvlan/defaults/main.yml delete mode 100644 roles/network_plugin/macvlan/files/ifdown-local delete mode 100755 roles/network_plugin/macvlan/files/ifdown-macvlan delete mode 100755 roles/network_plugin/macvlan/files/ifup-local delete mode 100755 roles/network_plugin/macvlan/files/ifup-macvlan delete mode 100644 roles/network_plugin/macvlan/handlers/main.yml delete mode 100644 roles/network_plugin/macvlan/meta/main.yml delete mode 100644 roles/network_plugin/macvlan/tasks/main.yml delete mode 100644 roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 delete mode 100644 roles/network_plugin/macvlan/templates/99-loopback.conf.j2 delete mode 100644 roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 delete mode 100644 roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 delete mode 100644 roles/network_plugin/multus/defaults/main.yml delete mode 100644 roles/network_plugin/multus/files/multus-clusterrole.yml delete mode 100644 roles/network_plugin/multus/files/multus-clusterrolebinding.yml delete mode 100644 roles/network_plugin/multus/files/multus-crd.yml delete mode 100644 roles/network_plugin/multus/files/multus-serviceaccount.yml delete mode 100644 roles/network_plugin/multus/meta/main.yml delete mode 100644 roles/network_plugin/multus/tasks/main.yml delete mode 100644 roles/network_plugin/multus/templates/multus-daemonset.yml.j2 delete mode 100644 roles/network_plugin/ovn4nfv/tasks/main.yml diff --git a/README.md b/README.md index 1257ec79d5d..0fe3c982e85 100644 --- a/README.md +++ b/README.md @@ -20,153 +20,17 @@ docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inve # Inside the container you may now run the kubespray playbooks: ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml ``` -## Documents -- [Requirements](#requirements) -- [Kubespray vs ...](docs/getting_started/comparisons.md) -- [Getting started](docs/getting_started/getting-started.md) -- [Setting up your first cluster](docs/getting_started/setting-up-your-first-cluster.md) -- [Ansible inventory and tags](docs/ansible/ansible.md) -- [Integration with existing ansible repo](docs/operations/integration.md) -- [Deployment data variables](docs/ansible/vars.md) -- [DNS stack](docs/advanced/dns-stack.md) -- [HA mode](docs/operations/ha-mode.md) -- [Network plugins](#network-plugins) -- [Vagrant install](docs/developers/vagrant.md) -- [Flatcar Container Linux bootstrap](docs/operating_systems/flatcar.md) -- [Fedora CoreOS bootstrap](docs/operating_systems/fcos.md) -- [openSUSE setup](docs/operating_systems/opensuse.md) -- [Downloaded artifacts](docs/advanced/downloads.md) -- [Equinix Metal](docs/cloud_providers/equinix-metal.md) -- [OpenStack](docs/cloud_controllers/openstack.md) -- [vSphere](docs/cloud_controllers/vsphere.md) -- [Large deployments](docs/operations/large-deployments.md) -- [Adding/replacing a node](docs/operations/nodes.md) -- [Upgrades basics](docs/operations/upgrades.md) -- [Air-Gap installation](docs/operations/offline-environment.md) -- [NTP](docs/advanced/ntp.md) -- [Hardening](docs/operations/hardening.md) -- [Mirror](docs/operations/mirror.md) -- [Roadmap](docs/roadmap/roadmap.md) - -## Supported Linux Distributions - -- **Flatcar Container Linux by Kinvolk** -- **Debian** Bookworm, Bullseye, Trixie -- **Ubuntu** 22.04, 24.04 -- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Fedora** 39, 40 -- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md)) -- **openSUSE** Leap 15.x/Tumbleweed -- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md)) -- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md)) -- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md)) -- **openEuler** (experimental: see [openEuler notes](docs/operating_systems/openeuler.md)) - -Note: - -- Upstart/SysV init based OS types are not supported. -- [Kernel requirements](docs/operations/kernel-requirements.md) (please read if the OS kernel version is < 4.19). - -## Supported Components - - - -- Core - - [kubernetes](https://github.com/kubernetes/kubernetes) 1.33.4 - - [etcd](https://github.com/etcd-io/etcd) 3.5.22 - - [docker](https://www.docker.com/) 28.3 - - [containerd](https://containerd.io/) 2.1.4 - - [cri-o](http://cri-o.io/) 1.33.3 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS) -- Network Plugin - - [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1 - - [calico](https://github.com/projectcalico/calico) 3.29.5 - - [cilium](https://github.com/cilium/cilium) 1.17.7 - - [flannel](https://github.com/flannel-io/flannel) 0.26.7 - - [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21 - - [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1 - - [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.2.2 - - [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0 -- Application - - [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3 - - [coredns](https://github.com/coredns/coredns) 1.12.0 - - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1 - - [argocd](https://argoproj.github.io/) 2.14.5 - - [helm](https://helm.sh/) 3.18.4 - - [metallb](https://metallb.universe.tf/) 0.13.9 - - [registry](https://github.com/distribution/distribution) 2.8.1 -- Storage Plugin - - [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) 0.5.0 - - [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0 - - [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0 - - [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2 - - [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24 - - [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0 - - [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4 - - - -## Container Runtime Notes - -- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20) - -## Requirements - -- **Minimum required version of Kubernetes is v1.30** -- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands** -- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md)) -- The target servers are configured to allow **IPv4 forwarding**. -- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**. -- The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - in order to avoid any issue during deployment you should disable your firewall. -- If kubespray is run from non-root user account, correct privilege escalation method - should be configured in the target servers. Then the `ansible_become` flag - or command parameters `--become or -b` should be specified. - -Hardware: -These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide. - -- Control Plane - - Memory: 2 GB -- Worker Node - - Memory: 1 GB - -## Network Plugins - -You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`) - -- [flannel](docs/CNI/flannel.md): gre/vxlan (layer 2) networking. - -- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options - designed to give you the most efficient networking across a range of situations, including non-overlay - and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts, - pods, and (if using Istio and Envoy) applications at the service mesh layer. - -- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic. - -- [kube-ovn](docs/CNI/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises. - -- [kube-router](docs/CNI/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational - simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy), - iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers). - It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs. - -- [macvlan](docs/CNI/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network. - -- [multus](docs/CNI/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc. - -- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray. - See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart. - -The network plugin to use is defined by the variable `kube_network_plugin`. There is also an -option to leverage built-in cloud provider networking instead. -See also [Network checker](docs/advanced/netcheck.md). - -## Ingress Plugins - -- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller. - -- [metallb](docs/ingress/metallb.md): the MetalLB bare-metal service LoadBalancer provider. +## In K8S 2SpeedLab, includes: +- kubernetes 1.33.4 +- etcd 3.5.22 +- containerd 2.1.4 +- cilium 1.17.7 +- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3 +- [coredns](https://github.com/coredns/coredns) 1.12.0 +- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1 +- [helm](https://helm.sh/) 3.18.4 +- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24 +- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0 +- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4 +- kube-vip 0.8.0 \ No newline at end of file diff --git a/docs/cloud_controllers/openstack.md b/docs/cloud_controllers/openstack.md deleted file mode 100644 index 7a80ff713bd..00000000000 --- a/docs/cloud_controllers/openstack.md +++ /dev/null @@ -1,134 +0,0 @@ -# OpenStack - -## Known compatible public clouds - -Kubespray has been tested on a number of OpenStack Public Clouds including (in alphabetical order): - -- [Auro](https://auro.io/) -- [Betacloud](https://www.betacloud.io/) -- [CityCloud](https://www.citycloud.com/) -- [DreamHost](https://www.dreamhost.com/cloud/computing/) -- [ELASTX](https://elastx.se/) -- [EnterCloudSuite](https://www.entercloudsuite.com/) -- [FugaCloud](https://fuga.cloud/) -- [Infomaniak](https://infomaniak.com) -- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars -- [OVHcloud](https://www.ovhcloud.com/) -- [Rackspace](https://www.rackspace.com/) -- [Ultimum](https://ultimum.io/) -- [VexxHost](https://vexxhost.com/) -- [Zetta](https://www.zetta.io/) - -## The OpenStack cloud provider - -The cloud provider is configured to have Octavia by default in Kubespray. - -- Enable the external OpenStack cloud provider in `group_vars/all/all.yml`: - - ```yaml - cloud_provider: external - external_cloud_provider: openstack - ``` - -- Enable Cinder CSI in `group_vars/all/openstack.yml`: - - ```yaml - cinder_csi_enabled: true - ``` - -- Enable topology support (optional), if your openstack provider has custom Zone names you can override the default "nova" zone by setting the variable `cinder_topology_zones` - - ```yaml - cinder_topology: true - ``` - -- Enabling `cinder_csi_ignore_volume_az: true`, ignores volumeAZ and schedules on any of the available node AZ. - - ```yaml - cinder_csi_ignore_volume_az: true - ``` - -- If you are using OpenStack loadbalancer(s) replace the `openstack_lbaas_subnet_id` with the new `external_openstack_lbaas_subnet_id`. **Note** The new cloud provider is using Octavia instead of Neutron LBaaS by default! - -- If you are in a case of a multi-nic OpenStack VMs (see [kubernetes/cloud-provider-openstack#407](https://github.com/kubernetes/cloud-provider-openstack/issues/407) and [#6083](https://github.com/kubernetes-sigs/kubespray/issues/6083) for explanation), you should override the default OpenStack networking configuration: - - ```yaml - external_openstack_network_ipv6_disabled: false - external_openstack_network_internal_networks: [] - external_openstack_network_public_networks: [] - ``` - -- You can override the default OpenStack metadata configuration (see [#6338](https://github.com/kubernetes-sigs/kubespray/issues/6338) for explanation): - - ```yaml - external_openstack_metadata_search_order: "configDrive,metadataService" - ``` - -- Available variables for configuring lbaas: - - ```yaml - external_openstack_lbaas_enabled: true - external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" - external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" - external_openstack_lbaas_method: ROUND_ROBIN - external_openstack_lbaas_provider: amphora - external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" - external_openstack_lbaas_member_subnet_id: "Neutron subnet ID on which to create the members of the load balancer" - external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" - external_openstack_lbaas_manage_security_groups: false - external_openstack_lbaas_create_monitor: false - external_openstack_lbaas_monitor_delay: 5s - external_openstack_lbaas_monitor_max_retries: 1 - external_openstack_lbaas_monitor_timeout: 3s - external_openstack_lbaas_internal_lb: false - - ``` - -- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider. -- Run the `cluster.yml` playbook - -## Additional step needed when using calico or kube-router - -Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly. - -OpenStack will filter and drop all packets from ips it does not know to prevent spoofing. - -In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use. - -First you will need the ids of your OpenStack instances that will run kubernetes: - - ```bash - openstack server list --project YOUR_PROJECT - +--------------------------------------+--------+----------------------------------+--------+-------------+ - | ID | Name | Tenant ID | Status | Power State | - +--------------------------------------+--------+----------------------------------+--------+-------------+ - | e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running | - | 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running | - ``` - -Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack): - - ```bash - openstack port list -c id -c device_id --project YOUR_PROJECT - +--------------------------------------+--------------------------------------+ - | id | device_id | - +--------------------------------------+--------------------------------------+ - | 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 | - | e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 | - ``` - -Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.) - - ```bash - # allow kube_service_addresses and kube_pods_subnet network - openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 - openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 - ``` - -If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with: - - ```bash - openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 - ``` - -Now you can finally run the playbook. diff --git a/docs/cloud_controllers/vsphere.md b/docs/cloud_controllers/vsphere.md deleted file mode 100644 index 72a2c1dbad0..00000000000 --- a/docs/cloud_controllers/vsphere.md +++ /dev/null @@ -1,134 +0,0 @@ -# vSphere - -Kubespray can be deployed with vSphere as Cloud provider. This feature supports: - -- Volumes -- Persistent Volumes -- Storage Classes and provisioning of volumes -- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes - -## Out-of-tree vSphere cloud provider - -### Prerequisites - -You need at first to configure your vSphere environment by following the [official documentation](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#prerequisites). - -After this step you should have: - -- vSphere upgraded to 6.7 U3 or later -- VM hardware upgraded to version 15 or higher -- UUID activated for each VM where Kubernetes will be deployed - -### Kubespray configuration - -First in `inventory/sample/group_vars/all/all.yml` you must set the `cloud_provider` to `external` and `external_cloud_provider` to `vsphere`. - -```yml -cloud_provider: "external" -external_cloud_provider: "vsphere" -``` - -Then, `inventory/sample/group_vars/all/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below. - -| Variable | Required | Type | Choices | Default | Comment | -|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------| -| external_vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | -| external_vsphere_vcenter_port | TRUE | string | | "443" | Port of the vCenter API | -| external_vsphere_insecure | TRUE | string | "true", "false" | "true" | set to "true" if the host above uses a self-signed cert | -| external_vsphere_user | TRUE | string | | | User name for vCenter with required privileges (Can also be specified with the `VSPHERE_USER` environment variable) | -| external_vsphere_password | TRUE | string | | | Password for vCenter (Can also be specified with the `VSPHERE_PASSWORD` environment variable) | -| external_vsphere_datacenter | TRUE | string | | | Datacenter name to use | -| external_vsphere_kubernetes_cluster_id | TRUE | string | | "kubernetes-cluster-id" | Kubernetes cluster ID to use | -| vsphere_csi_enabled | TRUE | boolean | | false | Enable vSphere CSI | - -Example configuration: - -```yml -external_vsphere_vcenter_ip: "myvcenter.domain.com" -external_vsphere_vcenter_port: "443" -external_vsphere_insecure: "true" -external_vsphere_user: "administrator@vsphere.local" -external_vsphere_password: "K8s_admin" -external_vsphere_datacenter: "DATACENTER_name" -external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" -vsphere_csi_enabled: true -``` - -For a more fine-grained CSI setup, refer to the [vsphere-csi](/docs/CSI/vsphere-csi.md) documentation. - -### Deployment - -Once the configuration is set, you can execute the playbook again to apply the new configuration: - -```ShellSession -cd kubespray -ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml -``` - -You'll find some useful examples [here](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#sample-manifests-to-test-csi-driver-functionality) to test your configuration. - -## In-tree vSphere cloud provider ([deprecated](https://cloud-provider-vsphere.sigs.k8s.io/concepts/in_tree_vs_out_of_tree.html)) - -### Prerequisites (deprecated) - -You need at first to configure your vSphere environment by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider). - -After this step you should have: - -- UUID activated for each VM where Kubernetes will be deployed -- A vSphere account with required privileges - -If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter. The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes. - -### Kubespray configuration (deprecated) - -First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`. - -```yml -cloud_provider: vsphere -``` - -Then, in the same file, you need to declare your vCenter credentials following the description below. - -| Variable | Required | Type | Choices | Default | Comment | -|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | -| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 | -| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert | -| vsphere_user | TRUE | string | | | User name for vCenter with required privileges | -| vsphere_password | TRUE | string | | | Password for vCenter | -| vsphere_datacenter | TRUE | string | | | Datacenter name to use | -| vsphere_datastore | TRUE | string | | | Datastore name to use | -| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed | -| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". | -| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` (Optional, only used for Kubernetes <= 1.9.2) | -| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to | -| vsphere_resource_pool | FALSE | string | | Blank | Name of the Resource pool where the VMs are located (Optional, only used for Kubernetes >= 1.9.2) | -| vsphere_zone_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/zone` label on nodes (Optional, only used for Kubernetes >= 1.12.0) | -| vsphere_region_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/region` label on nodes (Optional, only used for Kubernetes >= 1.12.0) | - -Example configuration: - -```yml -vsphere_vcenter_ip: "myvcenter.domain.com" -vsphere_vcenter_port: 443 -vsphere_insecure: 1 -vsphere_user: "k8s@vsphere.local" -vsphere_password: "K8s_admin" -vsphere_datacenter: "DATACENTER_name" -vsphere_datastore: "DATASTORE_name" -vsphere_working_dir: "Docker_hosts" -vsphere_scsi_controller_type: "pvscsi" -vsphere_resource_pool: "K8s-Pool" -``` - -### Deployment (deprecated) - -Once the configuration is set, you can execute the playbook again to apply the new configuration: - -```ShellSession -cd kubespray -ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml -``` - -You'll find some useful examples [here](https://github.com/kubernetes/examples/tree/master/staging/volumes/vsphere) to test your configuration. diff --git a/docs/cloud_providers/aws.md b/docs/cloud_providers/aws.md deleted file mode 100644 index 41706fdd568..00000000000 --- a/docs/cloud_providers/aws.md +++ /dev/null @@ -1,95 +0,0 @@ -# AWS - -> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider) - -To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider. - -Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. - -You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targeted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`. - -Make sure your VPC has both DNS Hostnames support and Private DNS enabled. - -The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. - -You can now create your cluster! - -## Dynamic Inventory - -There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome. - -This will produce an inventory that is passed into Ansible that looks like the following: - -```json -{ - "_meta": { - "hostvars": { - "ip-172-31-3-xxx.us-east-2.compute.internal": { - "ansible_ssh_host": "172.31.3.xxx" - }, - "ip-172-31-8-xxx.us-east-2.compute.internal": { - "ansible_ssh_host": "172.31.8.xxx" - } - } - }, - "etcd": [ - "ip-172-31-3-xxx.us-east-2.compute.internal" - ], - "k8s_cluster": { - "children": [ - "kube_control_plane", - "kube_node" - ] - }, - "kube_control_plane": [ - "ip-172-31-3-xxx.us-east-2.compute.internal" - ], - "kube_node": [ - "ip-172-31-8-xxx.us-east-2.compute.internal" - ] -} -``` - -Guide: - -- Create instances in AWS as needed. -- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube_node`. You can also share roles like `kube_control_plane, etcd` -- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory. -- Set the following AWS credentials and info as environment variables in your terminal: - -```ShellSession -export AWS_ACCESS_KEY_ID="xxxxx" -export AWS_SECRET_ACCESS_KEY="yyyyy" -export AWS_REGION="us-east-2" -``` - -- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` - -**Optional** Using labels and taints - -To add labels to your kubernetes node, add the following tag to your instance: - -- Key: `kubespray-node-labels` -- Value: `node-role.kubernetes.io/ingress=` - -To add taints to your kubernetes node, add the following tag to your instance: - -- Key: `kubespray-node-taints` -- Value: `node-role.kubernetes.io/ingress=:NoSchedule` - -## Kubespray configuration - -Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case. - -| Variable | Type | Comment | -|------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| aws_zone | string | Force set the AWS zone. Recommended to leave blank. | -| aws_vpc | string | The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided | -| aws_subnet_id | string | SubnetID enables using a specific subnet to use for ELB's | -| aws_route_table_id | string | RouteTableID enables using a specific RouteTable | -| aws_role_arn | string | RoleARN is the IAM role to assume when interaction with AWS APIs | -| aws_kubernetes_cluster_tag | string | KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources | -| aws_kubernetes_cluster_id | string | KubernetesClusterID is the cluster id we'll use to identify our cluster resources | -| aws_disable_security_group_ingress | bool | The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000. | -| aws_elb_security_group | string | Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead. | -| aws_disable_strict_zone_check | bool | During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment. | diff --git a/docs/cloud_providers/azure.md b/docs/cloud_providers/azure.md deleted file mode 100644 index 50d2f1d1c2b..00000000000 --- a/docs/cloud_providers/azure.md +++ /dev/null @@ -1,125 +0,0 @@ -# Azure - -> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider) - -To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `'azure'`. - -All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in. - -Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/Azure/AKS) - -## Parameters - -Before creating the instances you must first set the `azure_` variables in the `group_vars/all/all.yml` file. - -All values can be retrieved using the Azure CLI tool which can be downloaded here: -After installation you have to run `az login` to get access to your account. - -### azure_cloud - -Azure Stack has different API endpoints, depending on the Azure Stack deployment. These need to be provided to the Azure SDK. -Possible values are: `AzureChinaCloud`, `AzureGermanCloud`, `AzurePublicCloud` and `AzureUSGovernmentCloud`. -The full list of existing settings for the AzureChinaCloud, AzureGermanCloud, AzurePublicCloud and AzureUSGovernmentCloud -is available in the source code [here](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/cloud-provider-config.md) - -### azure\_tenant\_id + azure\_subscription\_id - -run `az account show` to retrieve your subscription id and tenant id: -`azure_tenant_id` -> Tenant ID field -`azure_subscription_id` -> ID field - -### azure\_location - -The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `az account list-locations` - -### azure\_resource\_group - -The name of the resource group your instances are in, can be retrieved via `az group list` - -### azure\_vmtype - -The type of the vm. Supported values are `standard` or `vmss`. If vm is type of `Virtual Machines` then value is `standard`. If vm is part of `Virtual Machine Scale Sets` then value is `vmss` - -### azure\_vnet\_name - -The name of the virtual network your instances are in, can be retrieved via `az network vnet list` - -### azure\_vnet\_resource\_group - -The name of the resource group that contains the vnet. - -### azure\_subnet\_name - -The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME` - -### azure\_security\_group\_name - -The name of the network security group your instances are in, can be retrieved via `az network nsg list` - -### azure\_security\_group\_resource\_group - -The name of the resource group that contains the network security group. Defaults to `azure_vnet_resource_group` - -### azure\_route\_table\_name - -The name of the route table used with your instances. - -### azure\_route\_table\_resource\_group - -The name of the resource group that contains the route table. Defaults to `azure_vnet_resource_group` - -### azure\_aad\_client\_id + azure\_aad\_client\_secret - -These will have to be generated first: - -- Create an Azure AD Application with: - - ```ShellSession - az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET - ``` - -display name, identifier-uri, homepage and the password can be chosen -Note the AppId in the output. - -- Create Service principal for the application with: - - ```ShellSession - az ad sp create --id AppId - ``` - -This is the AppId from the last command - -- Create the role assignment with: - - ```ShellSession - az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID - ``` - -azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret. - -### azure\_loadbalancer\_sku - -Sku of Load Balancer and Public IP. Candidate values are: basic and standard. - -### azure\_exclude\_master\_from\_standard\_lb - -azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer. - -### azure\_disable\_outbound\_snat - -azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`. - -### azure\_primary\_availability\_set\_name - -(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure -cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and -multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend -pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field. - -### azure\_use\_instance\_metadata - -Use instance metadata service where possible - -## Provisioning Azure with Resource Group Templates - -You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md) diff --git a/docs/cloud_providers/cloud.md b/docs/cloud_providers/cloud.md deleted file mode 100644 index d88a3aeccc6..00000000000 --- a/docs/cloud_providers/cloud.md +++ /dev/null @@ -1,15 +0,0 @@ -# Cloud providers - -> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider) - -## Provisioning - -You can deploy instances in your cloud environment in several ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation. - -## Deploy kubernetes - -With ansible-playbook command - -```ShellSession -ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml -``` diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml index 9af010ee219..cad27909de9 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml @@ -69,7 +69,7 @@ local_volume_provisioner_enabled: false gateway_api_enabled: false # Nginx ingress controller deployment -ingress_nginx_enabled: true +ingress_nginx_enabled: false # ingress_nginx_host_network: false # ingress_nginx_service_type: LoadBalancer # ingress_nginx_service_annotations: @@ -223,20 +223,20 @@ argocd_enabled: false # The plugin manager for kubectl # Kube VIP -kube_vip_enabled: false -# kube_vip_arp_enabled: true -# kube_vip_controlplane_enabled: true -# kube_vip_address: 192.168.56.120 -# loadbalancer_apiserver: -# address: "{{ kube_vip_address }}" -# port: 6443 -# kube_vip_interface: eth0 -# kube_vip_services_enabled: false -# kube_vip_dns_mode: first -# kube_vip_cp_detect: false -# kube_vip_leasename: plndr-cp-lock -# kube_vip_enable_node_labeling: false -# kube_vip_lb_fwdmethod: local +kube_vip_enabled: true +kube_vip_arp_enabled: true +kube_vip_controlplane_enabled: true +kube_vip_address: 100.10.0.2 +loadbalancer_apiserver: + address: "{{ kube_vip_address }}" + port: 6443 +kube_vip_interface: eth0 +kube_vip_services_enabled: true +kube_vip_dns_mode: first +kube_vip_cp_detect: false +kube_vip_leasename: plndr-cp-lock +kube_vip_enable_node_labeling: false +kube_vip_lb_fwdmethod: local # Node Feature Discovery node_feature_discovery_enabled: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml index cb9fa2438e7..8e0d99f65ff 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -64,7 +64,7 @@ credentials_dir: "{{ inventory_dir }}/credentials" # Choose network plugin (cilium, calico, kube-ovn or flannel. Use cni for generic cni plugin) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: calico +kube_network_plugin: cilium # Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni kube_network_plugin_multus: false @@ -121,7 +121,7 @@ kube_proxy_mode: ipvs # configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface # must be set to true for MetalLB, kube-vip(ARP enabled) to work -kube_proxy_strict_arp: false +kube_proxy_strict_arp: true # A string slice of values which specify the addresses to use for NodePorts. # Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). @@ -143,12 +143,12 @@ kube_encrypt_secret_data: false # Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ # kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow # non-critical podsa to also terminate gracefully -# kubelet_shutdown_grace_period: 60s -# kubelet_shutdown_grace_period_critical_pods: 20s +kubelet_shutdown_grace_period: 60s +kubelet_shutdown_grace_period_critical_pods: 20s # DNS configuration. # Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local +cluster_name: cluster.2speedlab.dev # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods ndots: 2 # dns_timeout: 2 diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml deleted file mode 100644 index cbe8c2a98c0..00000000000 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-calico.yml +++ /dev/null @@ -1,126 +0,0 @@ ---- -# see roles/network_plugin/calico/defaults/main.yml - -# the default value of name -calico_cni_name: k8s-pod-network - -## With calico it is possible to distributed routes with border routers of the datacenter. -## Warning : enabling router peering will disable calico's default behavior ('node mesh'). -## The subnets of each nodes will be distributed by the datacenter router -# peer_with_router: false - -# Enables Internet connectivity from containers -# nat_outgoing: true -# nat_outgoing_ipv6: true - -# Enables Calico CNI "host-local" IPAM plugin -# calico_ipam_host_local: true - -# add default ippool name -# calico_pool_name: "default-pool" - -# add default ippool blockSize -calico_pool_blocksize: 26 - -# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) -# calico_pool_cidr: 1.2.3.4/5 - -# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. -# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 - -# Global as_num (/calico/bgp/v1/global/as_num) -# global_as_num: "64512" - -# If doing peering with node-assigned asn where the globas does not match your nodes, you want this -# to be true. All other cases, false. -# calico_no_global_as_num: false - -# You can set MTU value here. If left undefined or empty, it will -# not be specified in calico CNI config, so Calico will use built-in -# defaults. The value should be a number, not a string. -# calico_mtu: 1500 - -# Configure the MTU to use for workload interfaces and tunnels. -# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) -# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) -# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) -# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) -# calico_veth_mtu: 1440 - -# Advertise Cluster IPs -# calico_advertise_cluster_ips: true - -# Advertise Service External IPs -# calico_advertise_service_external_ips: -# - x.x.x.x/24 -# - y.y.y.y/32 - -# Advertise Service LoadBalancer IPs -# calico_advertise_service_loadbalancer_ips: -# - x.x.x.x/24 -# - y.y.y.y/16 - -# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) -# calico_datastore: "kdd" - -# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" -# calico_iptables_backend: "Auto" - -# Use typha (only with kdd) -# typha_enabled: false - -# Generate TLS certs for secure typha<->calico-node communication -# typha_secure: false - -# Scaling typha: 1 replica per 100 nodes is adequate -# Number of typha replicas -# typha_replicas: 1 - -# Set max typha connections -# typha_max_connections_lower_limit: 300 - -# Set calico network backend: "bird", "vxlan" or "none" -# bird enable BGP routing, required for ipip and no encapsulation modes -# calico_network_backend: vxlan - -# IP in IP and VXLAN is mutually exclusive modes. -# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" -# calico_ipip_mode: 'Never' - -# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" -# calico_vxlan_mode: 'Always' - -# set VXLAN port and VNI -# calico_vxlan_vni: 4096 -# calico_vxlan_port: 4789 - -# Enable eBPF mode -# calico_bpf_enabled: false - -# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: -# * can-reach=DESTINATION -# * interface=INTERFACE-REGEX -# see https://docs.projectcalico.org/reference/node/configuration -# calico_ip_auto_method: "interface=eth.*" -# calico_ip6_auto_method: "interface=eth.*" - -# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. -# see https://projectcalico.docs.tigera.io/reference/felix/configuration -# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" - -# Choose the iptables insert mode for Calico: "Insert" or "Append". -# calico_felix_chaininsertmode: Insert - -# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) -# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS -# calico_use_default_route_src_ipaddr: false - -# Enable calico traffic encryption with wireguard -# calico_wireguard_enabled: false - -# Under certain situations liveness and readiness probes may need tunning -# calico_node_livenessprobe_timeout: 10 -# calico_node_readinessprobe_timeout: 10 - -# Calico apiserver (only with kdd) -# calico_apiserver_enabled: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml index 98e319d50d4..78b7dc4c8ca 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -25,7 +25,7 @@ cilium_l2announcements: false # - --synchronize-k8s-nodes # - --identity-allocation-mode=kvstore # - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations -# cilium_identity_allocation_mode: kvstore +cilium_identity_allocation_mode: crd # Etcd SSL dirs # cilium_cert_dir: /etc/cilium/certs @@ -103,11 +103,11 @@ cilium_l2announcements: false # cilium_native_routing_cidr_ipv6: "" # Enable transparent network encryption. -# cilium_encryption_enabled: false +cilium_encryption_enabled: true # Encryption method. Can be either ipsec or wireguard. # Only effective when `cilium_encryption_enabled` is set to true. -# cilium_encryption_type: "ipsec" +cilium_encryption_type: "wireguard" # Enable encryption for pure node to node traffic. # This option is only effective when `cilium_encryption_type` is set to `ipsec`. @@ -149,20 +149,20 @@ cilium_l2announcements: false # Hubble ### Enable Hubble without install -# cilium_enable_hubble: false +cilium_enable_hubble: true ### Enable Hubble-ui ### Installed by default when hubble is enabled. To disable set to false # cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" ### Enable Hubble Metrics # cilium_enable_hubble_metrics: false ### if cilium_enable_hubble_metrics: true -# cilium_hubble_metrics: {} -# - dns -# - drop -# - tcp -# - flow -# - icmp -# - http +cilium_hubble_metrics: + - dns + - drop + - tcp + - flow + - icmp + - http ### Enable Hubble install # cilium_hubble_install: false ### Enable auto generate certs if cilium_hubble_install: true @@ -181,7 +181,7 @@ cilium_l2announcements: false # IP address management mode for v1.9+. # https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ -# cilium_ipam_mode: kubernetes +cilium_ipam_mode: kubernetes # Extra arguments for the Cilium agent # cilium_agent_custom_args: [] diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml deleted file mode 100644 index 8850210c466..00000000000 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-custom-cni.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# custom_cni network plugin configuration -# There are two deployment options to choose from, select one - -## OPTION 1 - Static manifest files -## With this option, referred manifest file will be deployed -## as if the `kubectl apply -f` method was used with it. -# -## List of Kubernetes resource manifest files -## See tests/files/custom_cni/README.md for example -# custom_cni_manifests: [] - -## OPTION 1 EXAMPLE - Cilium static manifests in Kubespray tree -# custom_cni_manifests: -# - "{{ playbook_dir }}/../tests/files/custom_cni/cilium.yaml" - -## OPTION 2 - Helm chart application -## This allows the CNI backend to be deployed to Kubespray cluster -## as common Helm application. -# -## Helm release name - how the local instance of deployed chart will be named -# custom_cni_chart_release_name: "" -# -## Kubernetes namespace to deploy into -# custom_cni_chart_namespace: "kube-system" -# -## Helm repository name - how the local record of Helm repository will be named -# custom_cni_chart_repository_name: "" -# -## Helm repository URL -# custom_cni_chart_repository_url: "" -# -## Helm chart reference - path to the chart in the repository -# custom_cni_chart_ref: "" -# -## Helm chart version -# custom_cni_chart_version: "" -# -## Custom Helm values to be used for deployment -# custom_cni_chart_values: {} - -## OPTION 2 EXAMPLE - Cilium deployed from official public Helm chart -# custom_cni_chart_namespace: kube-system -# custom_cni_chart_release_name: cilium -# custom_cni_chart_repository_name: cilium -# custom_cni_chart_repository_url: https://helm.cilium.io -# custom_cni_chart_ref: cilium/cilium -# custom_cni_chart_version: (e.g.: 1.14.3) -# custom_cni_chart_values: -# cluster: -# name: "cilium-demo" diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml deleted file mode 100644 index 64d20a825bb..00000000000 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml +++ /dev/null @@ -1,18 +0,0 @@ -# see roles/network_plugin/flannel/defaults/main.yml - -## interface that should be used for flannel operations -## This is actually an inventory cluster-level item -# flannel_interface: - -## Select interface that should be used for flannel operations by regexp on Name or IP -## This is actually an inventory cluster-level item -## example: select interface with ip from net 10.0.0.0/23 -## single quote and escape backslashes -# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' - -# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' -# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md -# flannel_backend_type: "vxlan" -# flannel_vxlan_vni: 1 -# flannel_vxlan_port: 8472 -# flannel_vxlan_direct_routing: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml deleted file mode 100644 index d2534e72f12..00000000000 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-macvlan.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# private interface, on a l2-network -macvlan_interface: "eth1" - -# Enable nat in default gateway network interface -enable_nat_default_gateway: true diff --git a/playbooks/cluster.yml b/playbooks/cluster.yml index 12aeeee02cf..05df9121205 100644 --- a/playbooks/cluster.yml +++ b/playbooks/cluster.yml @@ -55,15 +55,6 @@ - { role: kubernetes-apps/common_crds } - { role: network_plugin, tags: network } -- name: Install Calico Route Reflector - hosts: calico_rr - gather_facts: false - any_errors_fatal: "{{ any_errors_fatal | default(true) }}" - environment: "{{ proxy_disable_env }}" - roles: - - { role: kubespray_defaults } - - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } - - name: Patch Kubernetes for Windows hosts: kube_control_plane[0] gather_facts: false diff --git a/roles/container-engine/containerd/meta/main.yml b/roles/container-engine/containerd/meta/main.yml index 41c5b6a9749..5629567722a 100644 --- a/roles/container-engine/containerd/meta/main.yml +++ b/roles/container-engine/containerd/meta/main.yml @@ -3,4 +3,3 @@ dependencies: - role: container-engine/containerd-common - role: container-engine/runc - role: container-engine/crictl - - role: container-engine/nerdctl diff --git a/roles/container-engine/cri-o/meta/main.yml b/roles/container-engine/cri-o/meta/main.yml index 5289208fb4a..ec9d9a55e9b 100644 --- a/roles/container-engine/cri-o/meta/main.yml +++ b/roles/container-engine/cri-o/meta/main.yml @@ -1,4 +1,3 @@ --- dependencies: - role: container-engine/crictl - - role: container-engine/skopeo diff --git a/roles/container-engine/kata-containers/defaults/main.yml b/roles/container-engine/kata-containers/defaults/main.yml deleted file mode 100644 index 40bbc33d579..00000000000 --- a/roles/container-engine/kata-containers/defaults/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -kata_containers_dir: /opt/kata -kata_containers_config_dir: /etc/kata-containers -kata_containers_containerd_bin_dir: /usr/local/bin - -kata_containers_qemu_default_memory: "{{ ansible_memtotal_mb }}" -kata_containers_qemu_debug: 'false' -kata_containers_qemu_sandbox_cgroup_only: 'true' -kata_containers_qemu_enable_mem_prealloc: 'false' -kata_containers_virtio_fs_cache: 'always' diff --git a/roles/container-engine/kata-containers/molecule/default/converge.yml b/roles/container-engine/kata-containers/molecule/default/converge.yml deleted file mode 100644 index 9a7922e9fa1..00000000000 --- a/roles/container-engine/kata-containers/molecule/default/converge.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Converge - hosts: all - become: true - vars: - kata_containers_enabled: true - container_manager: containerd - roles: - - role: kubespray_defaults - - role: container-engine/containerd - - role: container-engine/kata-containers diff --git a/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf b/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf deleted file mode 100644 index f10935b753b..00000000000 --- a/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf +++ /dev/null @@ -1,17 +0,0 @@ -{ - "cniVersion": "0.2.0", - "name": "mynet", - "type": "bridge", - "bridge": "cni0", - "isGateway": true, - "ipMasq": true, - "ipam": { - "type": "host-local", - "subnet": "172.19.0.0/24", - "routes": [ - { - "dst": "0.0.0.0/0" - } - ] - } -} diff --git a/roles/container-engine/kata-containers/molecule/default/files/container.json b/roles/container-engine/kata-containers/molecule/default/files/container.json deleted file mode 100644 index e2e9a56a730..00000000000 --- a/roles/container-engine/kata-containers/molecule/default/files/container.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "metadata": { - "name": "kata1" - }, - "image": { - "image": "quay.io/kubespray/hello-world:latest" - }, - "log_path": "kata1.0.log", - "linux": {} -} diff --git a/roles/container-engine/kata-containers/molecule/default/files/sandbox.json b/roles/container-engine/kata-containers/molecule/default/files/sandbox.json deleted file mode 100644 index 326a578bed6..00000000000 --- a/roles/container-engine/kata-containers/molecule/default/files/sandbox.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "metadata": { - "name": "kata1", - "namespace": "default", - "attempt": 1, - "uid": "hdishd83djaidwnduwk28bcsb" - }, - "linux": {}, - "log_directory": "/tmp" -} diff --git a/roles/container-engine/kata-containers/molecule/default/molecule.yml b/roles/container-engine/kata-containers/molecule/default/molecule.yml deleted file mode 100644 index 6e6e1c7cb52..00000000000 --- a/roles/container-engine/kata-containers/molecule/default/molecule.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -role_name_check: 1 -platforms: - - name: ubuntu22 - cloud_image: ubuntu-2204 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: - - kube_control_plane - - name: ubuntu24 - cloud_image: ubuntu-2404 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: - - kube_control_plane -provisioner: - name: ansible - env: - ANSIBLE_ROLES_PATH: ../../../../ - config_options: - defaults: - callbacks_enabled: profile_tasks - timeout: 120 - playbooks: - create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml -verifier: - name: ansible diff --git a/roles/container-engine/kata-containers/molecule/default/verify.yml b/roles/container-engine/kata-containers/molecule/default/verify.yml deleted file mode 100644 index 1bb02c32147..00000000000 --- a/roles/container-engine/kata-containers/molecule/default/verify.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Test kata-containers - hosts: all - gather_facts: false - tasks: - - name: Test version - command: "/opt/kata/bin/kata-runtime version" - register: version - failed_when: > - version is failed or - 'kata-runtime' not in version.stdout - - name: Test version - command: "/opt/kata/bin/kata-runtime check" - register: check - failed_when: > - check is failed or - 'System is capable of running' not in check.stdout - -- name: Test run container - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: kata-qemu - container_manager: containerd diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml deleted file mode 100644 index 5014c214a49..00000000000 --- a/roles/container-engine/kata-containers/tasks/main.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Kata-containers | Download kata binary - include_tasks: "../../../download/tasks/download_file.yml" - vars: - download: "{{ download_defaults | combine(downloads.kata_containers) }}" - -- name: Kata-containers | Copy kata-containers binary - unarchive: - src: "{{ downloads.kata_containers.dest }}" - dest: "/" - mode: "0755" - owner: root - group: root - remote_src: true - -- name: Kata-containers | Create config directory - file: - path: "{{ kata_containers_config_dir }}" - state: directory - mode: "0755" - -- name: Kata-containers | Set configuration - template: - src: "{{ item }}.j2" - dest: "{{ kata_containers_config_dir }}/{{ item }}" - mode: "0644" - with_items: - - configuration-qemu.toml - -- name: Kata-containers | Set containerd bin - vars: - shim: "{{ item }}" - template: - dest: "{{ kata_containers_containerd_bin_dir }}/containerd-shim-kata-{{ item }}-v2" - src: containerd-shim-kata-v2.j2 - mode: "0755" - with_items: - - qemu - -- name: Kata-containers | Load vhost kernel modules - community.general.modprobe: - state: present - name: "{{ item }}" - with_items: - - vhost_vsock - - vhost_net - -- name: Kata-containers | Persist vhost kernel modules - copy: - dest: /etc/modules-load.d/kubespray-kata-containers.conf - mode: "0644" - content: | - vhost_vsock - vhost_net diff --git a/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 b/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 deleted file mode 100644 index 15511442c6d..00000000000 --- a/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 +++ /dev/null @@ -1,706 +0,0 @@ -# Copyright (c) 2017-2019 Intel Corporation -# Copyright (c) 2021 Adobe Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# XXX: WARNING: this file is auto-generated. -# XXX: -# XXX: Source file: "config/configuration-qemu.toml.in" -# XXX: Project: -# XXX: Name: Kata Containers -# XXX: Type: kata - -[hypervisor.qemu] -path = "/opt/kata/bin/qemu-system-x86_64" -{% if kata_containers_version is version('2.2.0', '>=') %} -kernel = "/opt/kata/share/kata-containers/vmlinux.container" -{% else %} -kernel = "/opt/kata/share/kata-containers/vmlinuz.container" -{% endif %} -image = "/opt/kata/share/kata-containers/kata-containers.img" -# initrd = "/opt/kata/share/kata-containers/kata-containers-initrd.img" -machine_type = "q35" - -# rootfs filesystem type: -# - ext4 (default) -# - xfs -# - erofs -rootfs_type="ext4" - -# Enable confidential guest support. -# Toggling that setting may trigger different hardware features, ranging -# from memory encryption to both memory and CPU-state encryption and integrity. -# The Kata Containers runtime dynamically detects the available feature set and -# aims at enabling the largest possible one, returning an error if none is -# available, or none is supported by the hypervisor. -# -# Known limitations: -# * Does not work by design: -# - CPU Hotplug -# - Memory Hotplug -# - NVDIMM devices -# -# Default false -# confidential_guest = true - -# Choose AMD SEV-SNP confidential guests -# In case of using confidential guests on AMD hardware that supports both SEV -# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default. -# Default false -# sev_snp_guest = true - -# Enable running QEMU VMM as a non-root user. -# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as -# a non-root random user. See documentation for the limitations of this mode. -# rootless = true - -# List of valid annotation names for the hypervisor -# Each member of the list is a regular expression, which is the base name -# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" -enable_annotations = ["enable_iommu"] - -# List of valid annotations values for the hypervisor -# Each member of the list is a path pattern as described by glob(3). -# The default if not set is empty (all annotations rejected.) -# Your distribution recommends: ["/opt/kata/bin/qemu-system-x86_64"] -valid_hypervisor_paths = ["/opt/kata/bin/qemu-system-x86_64"] - -# Optional space-separated list of options to pass to the guest kernel. -# For example, use `kernel_params = "vsyscall=emulate"` if you are having -# trouble running pre-2.15 glibc. -# -# WARNING: - any parameter specified here will take priority over the default -# parameter value of the same name used to start the virtual machine. -# Do not set values here unless you understand the impact of doing so as you -# may stop the virtual machine from booting. -# To see the list of default parameters, enable hypervisor debug, create a -# container and look for 'default-kernel-parameters' log entries. -kernel_params = "" - -# Path to the firmware. -# If you want that qemu uses the default firmware leave this option empty -firmware = "" - -# Path to the firmware volume. -# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables -# as configuration) and FIRMWARE_CODE.fd (UEFI program image). UEFI variables -# can be customized per each user while UEFI code is kept same. -firmware_volume = "" - -# Machine accelerators -# comma-separated list of machine accelerators to pass to the hypervisor. -# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` -machine_accelerators="" - -# Qemu seccomp sandbox feature -# comma-separated list of seccomp sandbox features to control the syscall access. -# For example, `seccompsandbox= "on,obsolete=deny,spawn=deny,resourcecontrol=deny"` -# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox -# Another note: enabling this feature may reduce performance, you may enable -# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html -#seccompsandbox="on,obsolete=deny,spawn=deny,resourcecontrol=deny" - -# CPU features -# comma-separated list of cpu features to pass to the cpu -# For example, `cpu_features = "pmu=off,vmx=off" -cpu_features="pmu=off" - -# Default number of vCPUs per SB/VM: -# unspecified or 0 --> will be set to 1 -# < 0 --> will be set to the actual number of physical cores -# > 0 <= number of physical cores --> will be set to the specified number -# > number of physical cores --> will be set to the actual number of physical cores -default_vcpus = 1 - -# Default maximum number of vCPUs per SB/VM: -# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number -# of vCPUs supported by KVM if that number is exceeded -# > 0 <= number of physical cores --> will be set to the specified number -# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number -# of vCPUs supported by KVM if that number is exceeded -# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when -# the actual number of physical cores is greater than it. -# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU -# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs -# can be added to a SB/VM, but the memory footprint will be big. Another example, with -# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of -# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, -# unless you know what are you doing. -# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. -default_maxvcpus = 0 - -# Bridges can be used to hot plug devices. -# Limitations: -# * Currently only pci bridges are supported -# * Until 30 devices per bridge can be hot plugged. -# * Until 5 PCI bridges can be cold plugged per VM. -# This limitation could be a bug in qemu or in the kernel -# Default number of bridges per SB/VM: -# unspecified or 0 --> will be set to 1 -# > 1 <= 5 --> will be set to the specified number -# > 5 --> will be set to 5 -default_bridges = 1 - -# Default memory size in MiB for SB/VM. -# If unspecified then it will be set 2048 MiB. -default_memory = {{ kata_containers_qemu_default_memory }} -# -# Default memory slots per SB/VM. -# If unspecified then it will be set 10. -# This is will determine the times that memory will be hotadded to sandbox/VM. -#memory_slots = 10 - -# Default maximum memory in MiB per SB / VM -# unspecified or == 0 --> will be set to the actual amount of physical RAM -# > 0 <= amount of physical RAM --> will be set to the specified number -# > amount of physical RAM --> will be set to the actual amount of physical RAM -default_maxmemory = 0 - -# The size in MiB will be plused to max memory of hypervisor. -# It is the memory address space for the NVDIMM devie. -# If set block storage driver (block_device_driver) to "nvdimm", -# should set memory_offset to the size of block device. -# Default 0 -#memory_offset = 0 - -# Specifies virtio-mem will be enabled or not. -# Please note that this option should be used with the command -# "echo 1 > /proc/sys/vm/overcommit_memory". -# Default false -#enable_virtio_mem = true - -# Disable block device from being used for a container's rootfs. -# In case of a storage driver like devicemapper where a container's -# root file system is backed by a block device, the block device is passed -# directly to the hypervisor for performance reasons. -# This flag prevents the block device from being passed to the hypervisor, -# virtio-fs is used instead to pass the rootfs. -disable_block_device_use = false - -# Shared file system type: -# - virtio-fs (default) -# - virtio-9p -# - virtio-fs-nydus -{% if kata_containers_version is version('2.2.0', '>=') %} -shared_fs = "virtio-fs" -{% else %} -shared_fs = "virtio-9p" -{% endif %} - -# Path to vhost-user-fs daemon. -{% if kata_containers_version is version('2.5.0', '>=') %} -virtio_fs_daemon = "/opt/kata/libexec/virtiofsd" -{% else %} -virtio_fs_daemon = "/opt/kata/libexec/kata-qemu/virtiofsd" -{% endif %} - -# List of valid annotations values for the virtiofs daemon -# The default if not set is empty (all annotations rejected.) -# Your distribution recommends: ["/opt/kata/libexec/virtiofsd"] -valid_virtio_fs_daemon_paths = [ - "/opt/kata/libexec/virtiofsd", - "/opt/kata/libexec/kata-qemu/virtiofsd", -] - -# Default size of DAX cache in MiB -virtio_fs_cache_size = 0 - -# Default size of virtqueues -virtio_fs_queue_size = 1024 - -# Extra args for virtiofsd daemon -# -# Format example: -# ["--arg1=xxx", "--arg2=yyy"] -# Examples: -# Set virtiofsd log level to debug : ["--log-level=debug"] -# -# see `virtiofsd -h` for possible options. -virtio_fs_extra_args = ["--thread-pool-size=1", "--announce-submounts"] - -# Cache mode: -# -# - never -# Metadata, data, and pathname lookup are not cached in guest. They are -# always fetched from host and any changes are immediately pushed to host. -# -# - auto -# Metadata and pathname lookup cache expires after a configured amount of -# time (default is 1 second). Data is cached while the file is open (close -# to open consistency). -# -# - always -# Metadata, data, and pathname lookup are cached in guest and never expire. -virtio_fs_cache = "{{ kata_containers_virtio_fs_cache }}" - -# Block storage driver to be used for the hypervisor in case the container -# rootfs is backed by a block device. This is virtio-scsi, virtio-blk -# or nvdimm. -block_device_driver = "virtio-scsi" - -# aio is the I/O mechanism used by qemu -# Options: -# -# - threads -# Pthread based disk I/O. -# -# - native -# Native Linux I/O. -# -# - io_uring -# Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel>5.1 and -# qemu >=5.0. -block_device_aio = "io_uring" - -# Specifies cache-related options will be set to block devices or not. -# Default false -#block_device_cache_set = true - -# Specifies cache-related options for block devices. -# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. -# Default false -#block_device_cache_direct = true - -# Specifies cache-related options for block devices. -# Denotes whether flush requests for the device are ignored. -# Default false -#block_device_cache_noflush = true - -# Enable iothreads (data-plane) to be used. This causes IO to be -# handled in a separate IO thread. This is currently only implemented -# for SCSI. -# -enable_iothreads = false - -# Enable pre allocation of VM RAM, default false -# Enabling this will result in lower container density -# as all of the memory will be allocated and locked -# This is useful when you want to reserve all the memory -# upfront or in the cases where you want memory latencies -# to be very predictable -# Default false -enable_mem_prealloc = {{ kata_containers_qemu_enable_mem_prealloc }} - -# Enable huge pages for VM RAM, default false -# Enabling this will result in the VM memory -# being allocated using huge pages. -# This is useful when you want to use vhost-user network -# stacks within the container. This will automatically -# result in memory pre allocation -#enable_hugepages = true - -# Enable vhost-user storage device, default false -# Enabling this will result in some Linux reserved block type -# major range 240-254 being chosen to represent vhost-user devices. -enable_vhost_user_store = false - -# The base directory specifically used for vhost-user devices. -# Its sub-path "block" is used for block devices; "block/sockets" is -# where we expect vhost-user sockets to live; "block/devices" is where -# simulated block device nodes for vhost-user devices to live. -vhost_user_store_path = "/var/run/kata-containers/vhost-user" - -# Enable vIOMMU, default false -# Enabling this will result in the VM having a vIOMMU device -# This will also add the following options to the kernel's -# command line: intel_iommu=on,iommu=pt -#enable_iommu = true - -# Enable IOMMU_PLATFORM, default false -# Enabling this will result in the VM device having iommu_platform=on set -#enable_iommu_platform = true - -# List of valid annotations values for the vhost user store path -# The default if not set is empty (all annotations rejected.) -# Your distribution recommends: ["/var/run/kata-containers/vhost-user"] -valid_vhost_user_store_paths = ["/var/run/kata-containers/vhost-user"] - -# The timeout for reconnecting on non-server spdk sockets when the remote end goes away. -# qemu will delay this many seconds and then attempt to reconnect. -# Zero disables reconnecting, and the default is zero. -vhost_user_reconnect_timeout_sec = 0 - -# Enable file based guest memory support. The default is an empty string which -# will disable this feature. In the case of virtio-fs, this is enabled -# automatically and '/dev/shm' is used as the backing folder. -# This option will be ignored if VM templating is enabled. -#file_mem_backend = "" - -# List of valid annotations values for the file_mem_backend annotation -# The default if not set is empty (all annotations rejected.) -# Your distribution recommends: [""] -valid_file_mem_backends = [""] - -# -pflash can add image file to VM. The arguments of it should be in format -# of ["/path/to/flash0.img", "/path/to/flash1.img"] -pflashes = [] - -# This option changes the default hypervisor and kernel parameters -# to enable debug output where available. And Debug also enables the hmp socket. -# -# Default false -enable_debug = {{ kata_containers_qemu_debug }} - -# Disable the customizations done in the runtime when it detects -# that it is running on top a VMM. This will result in the runtime -# behaving as it would when running on bare metal. -# -#disable_nesting_checks = true - -# This is the msize used for 9p shares. It is the number of bytes -# used for 9p packet payload. -#msize_9p = 8192 - -# If false and nvdimm is supported, use nvdimm device to plug guest image. -# Otherwise virtio-block device is used. -# -# nvdimm is not supported when `confidential_guest = true`. -# -# Default is false -#disable_image_nvdimm = true - -# VFIO devices are hotplugged on a bridge by default. -# Enable hotplugging on root bus. This may be required for devices with -# a large PCI bar, as this is a current limitation with hotplugging on -# a bridge. -# Default false -#hotplug_vfio_on_root_bus = true - -# Before hot plugging a PCIe device, you need to add a pcie_root_port device. -# Use this parameter when using some large PCI bar devices, such as Nvidia GPU -# The value means the number of pcie_root_port -# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35" -# Default 0 -#pcie_root_port = 2 - -# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off -# security (vhost-net runs ring0) for network I/O performance. -#disable_vhost_net = true - -# -# Default entropy source. -# The path to a host source of entropy (including a real hardware RNG) -# /dev/urandom and /dev/random are two main options. -# Be aware that /dev/random is a blocking source of entropy. If the host -# runs out of entropy, the VMs boot time will increase leading to get startup -# timeouts. -# The source of entropy /dev/urandom is non-blocking and provides a -# generally acceptable source of entropy. It should work well for pretty much -# all practical purposes. -#entropy_source= "/dev/urandom" - -# List of valid annotations values for entropy_source -# The default if not set is empty (all annotations rejected.) -# Your distribution recommends: ["/dev/urandom","/dev/random",""] -valid_entropy_sources = ["/dev/urandom","/dev/random",""] - -# Path to OCI hook binaries in the *guest rootfs*. -# This does not affect host-side hooks which must instead be added to -# the OCI spec passed to the runtime. -# -# You can create a rootfs with hooks by customizing the osbuilder scripts: -# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder -# -# Hooks must be stored in a subdirectory of guest_hook_path according to their -# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}". -# The agent will scan these directories for executable files and add them, in -# lexicographical order, to the lifecycle of the guest container. -# Hooks are executed in the runtime namespace of the guest. See the official documentation: -# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks -# Warnings will be logged if any error is encountered while scanning for hooks, -# but it will not abort container execution. -#guest_hook_path = "/usr/share/oci/hooks" -# -# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM). -# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic. -# Default 0-sized value means unlimited rate. -#rx_rate_limiter_max_rate = 0 -# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM). -# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block) -# to discipline traffic. -# Default 0-sized value means unlimited rate. -#tx_rate_limiter_max_rate = 0 - -# Set where to save the guest memory dump file. -# If set, when GUEST_PANICKED event occurred, -# guest memeory will be dumped to host filesystem under guest_memory_dump_path, -# This directory will be created automatically if it does not exist. -# -# The dumped file(also called vmcore) can be processed with crash or gdb. -# -# WARNING: -# Dump guest’s memory can take very long depending on the amount of guest memory -# and use much disk space. -#guest_memory_dump_path="/var/crash/kata" - -# If enable paging. -# Basically, if you want to use "gdb" rather than "crash", -# or need the guest-virtual addresses in the ELF vmcore, -# then you should enable paging. -# -# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details -#guest_memory_dump_paging=false - -# Enable swap in the guest. Default false. -# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device -# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness") -# is bigger than 0. -# The size of the swap device should be -# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes. -# If swap_in_bytes is not set, the size should be memory_limit_in_bytes. -# If swap_in_bytes and memory_limit_in_bytes is not set, the size should -# be default_memory. -#enable_guest_swap = true - -# use legacy serial for guest console if available and implemented for architecture. Default false -#use_legacy_serial = true - -# disable applying SELinux on the VMM process (default false) -disable_selinux=false - -# disable applying SELinux on the container process -# If set to false, the type `container_t` is applied to the container process by default. -# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built -# with `SELINUX=yes`. -# (default: true) -disable_guest_selinux=true - -[factory] -# VM templating support. Once enabled, new VMs are created from template -# using vm cloning. They will share the same initial kernel, initramfs and -# agent memory by mapping it readonly. It helps speeding up new container -# creation and saves a lot of memory if there are many kata containers running -# on the same host. -# -# When disabled, new VMs are created from scratch. -# -# Note: Requires "initrd=" to be set ("image=" is not supported). -# -# Default false -#enable_template = true - -# Specifies the path of template. -# -# Default "/run/vc/vm/template" -#template_path = "/run/vc/vm/template" - -# The number of caches of VMCache: -# unspecified or == 0 --> VMCache is disabled -# > 0 --> will be set to the specified number -# -# VMCache is a function that creates VMs as caches before using it. -# It helps speed up new container creation. -# The function consists of a server and some clients communicating -# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. -# The VMCache server will create some VMs and cache them by factory cache. -# It will convert the VM to gRPC format and transport it when gets -# requestion from clients. -# Factory grpccache is the VMCache client. It will request gRPC format -# VM and convert it back to a VM. If VMCache function is enabled, -# kata-runtime will request VM from factory grpccache when it creates -# a new sandbox. -# -# Default 0 -#vm_cache_number = 0 - -# Specify the address of the Unix socket that is used by VMCache. -# -# Default /var/run/kata-containers/cache.sock -#vm_cache_endpoint = "/var/run/kata-containers/cache.sock" - -[agent.kata] -# If enabled, make the agent display debug-level messages. -# (default: disabled) -enable_debug = {{ kata_containers_qemu_debug }} - -# Enable agent tracing. -# -# If enabled, the agent will generate OpenTelemetry trace spans. -# -# Notes: -# -# - If the runtime also has tracing enabled, the agent spans will be -# associated with the appropriate runtime parent span. -# - If enabled, the runtime will wait for the container to shutdown, -# increasing the container shutdown time slightly. -# -# (default: disabled) -#enable_tracing = true - -# Comma separated list of kernel modules and their parameters. -# These modules will be loaded in the guest kernel using modprobe(8). -# The following example can be used to load two kernel modules with parameters -# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] -# The first word is considered as the module name and the rest as its parameters. -# Container will not be started when: -# * A kernel module is specified and the modprobe command is not installed in the guest -# or it fails loading the module. -# * The module is not available in the guest or it doesn't met the guest kernel -# requirements, like architecture and version. -# -kernel_modules=[] - -# Enable debug console. - -# If enabled, user can connect guest OS running inside hypervisor -# through "kata-runtime exec " command - -#debug_console_enabled = true - -# Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 - -[runtime] -# If enabled, the runtime will log additional debug messages to the -# system log -# (default: disabled) -enable_debug = {{ kata_containers_qemu_debug }} -# -# Internetworking model -# Determines how the VM should be connected to the -# the container network interface -# Options: -# -# - macvtap -# Used when the Container network interface can be bridged using -# macvtap. -# -# - none -# Used when customize network. Only creates a tap device. No veth pair. -# -# - tcfilter -# Uses tc filter rules to redirect traffic from the network interface -# provided by plugin to a tap interface connected to the VM. -# -internetworking_model="tcfilter" - -# disable guest seccomp -# Determines whether container seccomp profiles are passed to the virtual -# machine and applied by the kata agent. If set to true, seccomp is not applied -# within the guest -# (default: true) -disable_guest_seccomp=true - -# vCPUs pinning settings -# if enabled, each vCPU thread will be scheduled to a fixed CPU -# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet) -# enable_vcpus_pinning = false - -# Apply a custom SELinux security policy to the container process inside the VM. -# This is used when you want to apply a type other than the default `container_t`, -# so general users should not uncomment and apply it. -# (format: "user:role:type") -# Note: You cannot specify MCS policy with the label because the sensitivity levels and -# categories are determined automatically by high-level container runtimes such as containerd. -#guest_selinux_label="system_u:system_r:container_t" - -# If enabled, the runtime will create opentracing.io traces and spans. -# (See https://www.jaegertracing.io/docs/getting-started). -# (default: disabled) -#enable_tracing = true - -# Set the full url to the Jaeger HTTP Thrift collector. -# The default if not set will be "http://localhost:14268/api/traces" -#jaeger_endpoint = "" - -# Sets the username to be used if basic auth is required for Jaeger. -#jaeger_user = "" - -# Sets the password to be used if basic auth is required for Jaeger. -#jaeger_password = "" - -# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. -# This option may have some potential impacts to your host. It should only be used when you know what you're doing. -# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only -# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge -# (like OVS) directly. -# (default: false) -#disable_new_netns = true - -# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. -# The container cgroups in the host are not created, just one single cgroup per sandbox. -# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. -# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. -# The sandbox cgroup is constrained if there is no container type annotation. -# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType -sandbox_cgroup_only={{ kata_containers_qemu_sandbox_cgroup_only }} - -# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In -# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful -# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug. -# Compatibility for determining appropriate sandbox (VM) size: -# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O -# does not yet support sandbox sizing annotations. -# - When running single containers using a tool like ctr, container sizing information will be available. -static_sandbox_resource_mgmt=false - -# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. -# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. -# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` -# These will not be exposed to the container workloads, and are only provided for potential guest services. -sandbox_bind_mounts=[] - -# VFIO Mode -# Determines how VFIO devices should be be presented to the container. -# Options: -# -# - vfio -# Matches behaviour of OCI runtimes (e.g. runc) as much as -# possible. VFIO devices will appear in the container as VFIO -# character devices under /dev/vfio. The exact names may differ -# from the host (they need to match the VM's IOMMU group numbers -# rather than the host's) -# -# - guest-kernel -# This is a Kata-specific behaviour that's useful in certain cases. -# The VFIO device is managed by whatever driver in the VM kernel -# claims it. This means it will appear as one or more device nodes -# or network interfaces depending on the nature of the device. -# Using this mode requires specially built workloads that know how -# to locate the relevant device interfaces within the VM. -# -vfio_mode="guest-kernel" - -# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will -# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest. -disable_guest_empty_dir=false - -# Enabled experimental feature list, format: ["a", "b"]. -# Experimental features are features not stable enough for production, -# they may break compatibility, and are prepared for a big version bump. -# Supported experimental features: -# (default: []) -experimental=[] - -# If enabled, user can run pprof tools with shim v2 process through kata-monitor. -# (default: false) -# enable_pprof = true - -# WARNING: All the options in the following section have not been implemented yet. -# This section was added as a placeholder. DO NOT USE IT! -[image] -# Container image service. -# -# Offload the CRI image management service to the Kata agent. -# (default: false) -#service_offload = true - -# Container image decryption keys provisioning. -# Applies only if service_offload is true. -# Keys can be provisioned locally (e.g. through a special command or -# a local file) or remotely (usually after the guest is remotely attested). -# The provision setting is a complete URL that lets the Kata agent decide -# which method to use in order to fetch the keys. -# -# Keys can be stored in a local file, in a measured and attested initrd: -#provision=data:///local/key/file -# -# Keys could be fetched through a special command or binary from the -# initrd (guest) image, e.g. a firmware call: -#provision=file:///path/to/bin/fetcher/in/guest -# -# Keys can be remotely provisioned. The Kata agent fetches them from e.g. -# a HTTPS URL: -#provision=https://my-key-broker.foo/tenant/ diff --git a/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 b/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 deleted file mode 100644 index a3cb830e528..00000000000 --- a/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -KATA_CONF_FILE={{ kata_containers_config_dir }}/configuration-{{ shim }}.toml {{ kata_containers_dir }}/bin/containerd-shim-kata-v2 $@ diff --git a/roles/container-engine/meta/main.yml b/roles/container-engine/meta/main.yml index 3e068d60a0a..49fe314c8e1 100644 --- a/roles/container-engine/meta/main.yml +++ b/roles/container-engine/meta/main.yml @@ -6,13 +6,6 @@ dependencies: - container-engine - validate-container-engine - - role: container-engine/kata-containers - when: - - kata_containers_enabled - tags: - - container-engine - - kata-containers - - role: container-engine/gvisor when: - gvisor_enabled @@ -21,38 +14,9 @@ dependencies: - container-engine - gvisor - - role: container-engine/crun - when: - - crun_enabled - tags: - - container-engine - - crun - - - role: container-engine/youki - when: - - youki_enabled - - container_manager == 'crio' - tags: - - container-engine - - youki - - - role: container-engine/cri-o - when: - - container_manager == 'crio' - tags: - - container-engine - - crio - - role: container-engine/containerd when: - container_manager == 'containerd' tags: - container-engine - containerd - - - role: container-engine/cri-dockerd - when: - - container_manager == 'docker' - tags: - - container-engine - - docker diff --git a/roles/container-engine/nerdctl/handlers/main.yml b/roles/container-engine/nerdctl/handlers/main.yml deleted file mode 100644 index 1744706075c..00000000000 --- a/roles/container-engine/nerdctl/handlers/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Get nerdctl completion - command: "{{ bin_dir }}/nerdctl completion bash" - changed_when: false - register: nerdctl_completion - check_mode: false - -- name: Install nerdctl completion - copy: - dest: /etc/bash_completion.d/nerdctl - content: "{{ nerdctl_completion.stdout }}" - mode: "0644" diff --git a/roles/container-engine/nerdctl/tasks/main.yml b/roles/container-engine/nerdctl/tasks/main.yml deleted file mode 100644 index d3cd0070cac..00000000000 --- a/roles/container-engine/nerdctl/tasks/main.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- name: Nerdctl | Download nerdctl - include_tasks: "../../../download/tasks/download_file.yml" - vars: - download: "{{ download_defaults | combine(downloads.nerdctl) }}" - -- name: Nerdctl | Copy nerdctl binary from download dir - copy: - src: "{{ local_release_dir }}/nerdctl" - dest: "{{ bin_dir }}/nerdctl" - mode: "0755" - remote_src: true - owner: root - group: root - become: true - notify: - - Get nerdctl completion - - Install nerdctl completion - -- name: Nerdctl | Create configuration dir - file: - path: /etc/nerdctl - state: directory - mode: "0755" - owner: root - group: root - become: true - -- name: Nerdctl | Install nerdctl configuration - template: - src: nerdctl.toml.j2 - dest: /etc/nerdctl/nerdctl.toml - mode: "0644" - owner: root - group: root - become: true diff --git a/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 b/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 deleted file mode 100644 index 8b590f6f53c..00000000000 --- a/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -debug = false -debug_full = false -address = "{{ cri_socket }}" -namespace = "k8s.io" -snapshotter = "{{ nerdctl_snapshotter | default('overlayfs') }}" -cni_path = "/opt/cni/bin" -cni_netconfpath = "/etc/cni/net.d" -cgroup_manager = "{{ kubelet_cgroup_driver | default('systemd') }}" -hosts_dir = ["{{ containerd_cfg_dir }}/certs.d"] diff --git a/roles/container-engine/skopeo/tasks/main.yml b/roles/container-engine/skopeo/tasks/main.yml deleted file mode 100644 index 8f21e3f1c3b..00000000000 --- a/roles/container-engine/skopeo/tasks/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Skopeo | check if fedora coreos - stat: - path: /run/ostree-booted - get_attributes: false - get_checksum: false - get_mime: false - register: ostree - -- name: Skopeo | set is_ostree - set_fact: - is_ostree: "{{ ostree.stat.exists }}" - -- name: Skopeo | Uninstall skopeo package managed by package manager - package: - name: skopeo - state: absent - when: - - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) - ignore_errors: true # noqa ignore-errors - -- name: Skopeo | Download skopeo binary - include_tasks: "../../../download/tasks/download_file.yml" - vars: - download: "{{ download_defaults | combine(downloads.skopeo) }}" - -- name: Copy skopeo binary from download dir - copy: - src: "{{ downloads.skopeo.dest }}" - dest: "{{ bin_dir }}/skopeo" - mode: "0755" - remote_src: true diff --git a/roles/container-engine/youki/defaults/main.yml b/roles/container-engine/youki/defaults/main.yml deleted file mode 100644 index 2250f22ae3d..00000000000 --- a/roles/container-engine/youki/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- - -youki_bin_dir: "{{ bin_dir }}" diff --git a/roles/container-engine/youki/molecule/default/converge.yml b/roles/container-engine/youki/molecule/default/converge.yml deleted file mode 100644 index caa6176559d..00000000000 --- a/roles/container-engine/youki/molecule/default/converge.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Converge - hosts: all - become: true - vars: - youki_enabled: true - container_manager: crio - roles: - - role: kubespray_defaults - - role: container-engine/cri-o - - role: container-engine/youki diff --git a/roles/container-engine/youki/molecule/default/files/10-mynet.conf b/roles/container-engine/youki/molecule/default/files/10-mynet.conf deleted file mode 100644 index b9fa3ba73b6..00000000000 --- a/roles/container-engine/youki/molecule/default/files/10-mynet.conf +++ /dev/null @@ -1,17 +0,0 @@ -{ - "cniVersion": "0.4.0", - "name": "mynet", - "type": "bridge", - "bridge": "cni0", - "isGateway": true, - "ipMasq": true, - "ipam": { - "type": "host-local", - "subnet": "172.19.0.0/24", - "routes": [ - { - "dst": "0.0.0.0/0" - } - ] - } -} diff --git a/roles/container-engine/youki/molecule/default/files/container.json b/roles/container-engine/youki/molecule/default/files/container.json deleted file mode 100644 index a5d50943128..00000000000 --- a/roles/container-engine/youki/molecule/default/files/container.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "metadata": { - "name": "youki1" - }, - "image": { - "image": "quay.io/kubespray/hello-world:latest" - }, - "log_path": "youki1.0.log", - "linux": {} -} diff --git a/roles/container-engine/youki/molecule/default/files/sandbox.json b/roles/container-engine/youki/molecule/default/files/sandbox.json deleted file mode 100644 index b2a4ffe50fe..00000000000 --- a/roles/container-engine/youki/molecule/default/files/sandbox.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "metadata": { - "name": "youki1", - "namespace": "default", - "attempt": 1, - "uid": "hdishd83djaidwnduwk28bcsb" - }, - "linux": {}, - "log_directory": "/tmp" -} diff --git a/roles/container-engine/youki/molecule/default/molecule.yml b/roles/container-engine/youki/molecule/default/molecule.yml deleted file mode 100644 index f73a9775cc4..00000000000 --- a/roles/container-engine/youki/molecule/default/molecule.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -role_name_check: 1 -platforms: - - cloud_image: ubuntu-2404 - name: ubuntu24 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: - - kube_control_plane - - name: almalinux9 - cloud_image: almalinux-9 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: - - kube_control_plane -provisioner: - name: ansible - env: - ANSIBLE_ROLES_PATH: ../../../../ - config_options: - defaults: - callbacks_enabled: profile_tasks - timeout: 120 - playbooks: - create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml -verifier: - name: ansible diff --git a/roles/container-engine/youki/molecule/default/verify.yml b/roles/container-engine/youki/molecule/default/verify.yml deleted file mode 100644 index 75adeb55933..00000000000 --- a/roles/container-engine/youki/molecule/default/verify.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Test youki - hosts: all - gather_facts: false - tasks: - - name: Get kubespray defaults - import_role: - name: ../../../../../kubespray_defaults - - name: Test version - command: "{{ bin_dir }}/youki --version" - register: youki_version - failed_when: > - youki_version is failed or - 'youki' not in youki_version.stdout - -- name: Test run container - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: youki diff --git a/roles/container-engine/youki/tasks/main.yml b/roles/container-engine/youki/tasks/main.yml deleted file mode 100644 index 7750c65b8d2..00000000000 --- a/roles/container-engine/youki/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Youki | Download youki - include_tasks: "../../../download/tasks/download_file.yml" - vars: - download: "{{ download_defaults | combine(downloads.youki) }}" - -- name: Youki | Copy youki binary from download dir - copy: - src: "{{ local_release_dir }}/youki" - dest: "{{ youki_bin_dir }}/youki" - mode: "0755" - remote_src: true diff --git a/roles/kubernetes/control-plane/meta/main.yml b/roles/kubernetes/control-plane/meta/main.yml index ceb05687d83..9f460e51153 100644 --- a/roles/kubernetes/control-plane/meta/main.yml +++ b/roles/kubernetes/control-plane/meta/main.yml @@ -6,5 +6,4 @@ dependencies: when: - etcd_deployment_type == "kubeadm" - not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) - - role: network_plugin/calico_defaults - role: etcd_defaults diff --git a/roles/network_plugin/calico/files/openssl.conf b/roles/network_plugin/calico/files/openssl.conf deleted file mode 100644 index f4ba47da731..00000000000 --- a/roles/network_plugin/calico/files/openssl.conf +++ /dev/null @@ -1,27 +0,0 @@ -req_extensions = v3_req -distinguished_name = req_distinguished_name - -[req_distinguished_name] - -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = digitalSignature, keyEncipherment - -[ ssl_client ] -extendedKeyUsage = clientAuth, serverAuth -basicConstraints = CA:FALSE -subjectKeyIdentifier=hash -authorityKeyIdentifier=keyid,issuer - -[ v3_ca ] -basicConstraints = CA:TRUE -keyUsage = cRLSign, digitalSignature, keyCertSign -subjectKeyIdentifier=hash -authorityKeyIdentifier=keyid:always,issuer - -[ ssl_client_apiserver ] -extendedKeyUsage = clientAuth, serverAuth -basicConstraints = CA:FALSE -subjectKeyIdentifier=hash -authorityKeyIdentifier=keyid,issuer -subjectAltName = DNS:calico-api.calico-apiserver.svc diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml deleted file mode 100644 index f5f5dc29ebc..00000000000 --- a/roles/network_plugin/calico/handlers/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Delete 10-calico.conflist - file: - path: /etc/cni/net.d/10-calico.conflist - state: absent - listen: Reset_calico_cni - when: calico_cni_config is defined - -- name: Calico | delete calico-node docker containers - shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" - args: - executable: /bin/bash - register: docker_calico_node_remove - until: docker_calico_node_remove is succeeded - retries: 5 - when: - - container_manager in ["docker"] - - calico_cni_config is defined - listen: Reset_calico_cni - -- name: Calico | delete calico-node crio/containerd containers - shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' - args: - executable: /bin/bash - register: crictl_calico_node_remove - until: crictl_calico_node_remove is succeeded - retries: 5 - when: - - container_manager in ["crio", "containerd"] - - calico_cni_config is defined - listen: Reset_calico_cni diff --git a/roles/network_plugin/calico/meta/main.yml b/roles/network_plugin/calico/meta/main.yml deleted file mode 100644 index 15e9b8c408d..00000000000 --- a/roles/network_plugin/calico/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: network_plugin/calico_defaults diff --git a/roles/network_plugin/calico/rr/defaults/main.yml b/roles/network_plugin/calico/rr/defaults/main.yml deleted file mode 100644 index dedda197cbc..00000000000 --- a/roles/network_plugin/calico/rr/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# Global as_num (/calico/bgp/v1/global/as_num) -# should be the same as in calico role -global_as_num: "64512" -calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml deleted file mode 100644 index 471518d9f22..00000000000 --- a/roles/network_plugin/calico/rr/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: Calico-rr | Pre-upgrade tasks - include_tasks: pre.yml - -- name: Calico-rr | Configuring node tasks - include_tasks: update-node.yml - -- name: Calico-rr | Set label for route reflector - command: >- - {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} - 'i-am-a-route-reflector=true' --overwrite - changed_when: false - register: calico_rr_label - until: calico_rr_label is succeeded - delay: "{{ retry_stagger | random + 3 }}" - retries: 10 diff --git a/roles/network_plugin/calico/rr/tasks/pre.yml b/roles/network_plugin/calico/rr/tasks/pre.yml deleted file mode 100644 index f8a9de6118b..00000000000 --- a/roles/network_plugin/calico/rr/tasks/pre.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Calico-rr | Disable calico-rr service if it exists - service: - name: calico-rr - state: stopped - enabled: false - failed_when: false - -- name: Calico-rr | Delete obsolete files - file: - path: "{{ item }}" - state: absent - with_items: - - /etc/calico/calico-rr.env - - /etc/systemd/system/calico-rr.service diff --git a/roles/network_plugin/calico/rr/tasks/update-node.yml b/roles/network_plugin/calico/rr/tasks/update-node.yml deleted file mode 100644 index fc873ba13fd..00000000000 --- a/roles/network_plugin/calico/rr/tasks/update-node.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# Workaround to retry a block of tasks, ansible doesn't have a direct way to do it, -# you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203 -- name: Calico-rr | Configure route reflector - block: - - name: Set the retry count - set_fact: - retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}" - - - name: Calico | Set label for route reflector # noqa command-instead-of-shell - shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite" - changed_when: false - register: calico_rr_id_label - until: calico_rr_id_label is succeeded - delay: "{{ retry_stagger | random + 3 }}" - retries: 10 - when: calico_rr_id is defined - - - name: Calico-rr | Fetch current node object - command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson" - changed_when: false - register: calico_rr_node - until: calico_rr_node is succeeded - delay: "{{ retry_stagger | random + 3 }}" - retries: 10 - - - name: Calico-rr | Set route reflector cluster ID - # noqa: jinja[spacing] - set_fact: - calico_rr_node_patched: >- - {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp': - { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }} - - - name: Calico-rr | Configure route reflector # noqa command-instead-of-shell - shell: "{{ bin_dir }}/calicoctl.sh replace -f-" - args: - stdin: "{{ calico_rr_node_patched | to_json }}" - - rescue: - - name: Fail if retry limit is reached - fail: - msg: Ended after 10 retries - when: retry_count | int == 10 - - - name: Retrying node configuration - debug: - msg: "Failed to configure route reflector - Retrying..." - - - name: Retry node configuration - include_tasks: update-node.yml diff --git a/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml b/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml deleted file mode 100644 index d42917c4eca..00000000000 --- a/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- name: Calico | Check if calico apiserver exists - command: "{{ kubectl }} -n calico-apiserver get secret calico-apiserver-certs" - register: calico_apiserver_secret - changed_when: false - failed_when: false - -- name: Calico | Create ns manifests - template: - src: "calico-apiserver-ns.yml.j2" - dest: "{{ kube_config_dir }}/calico-apiserver-ns.yml" - mode: "0644" - -- name: Calico | Apply ns manifests - kube: - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/calico-apiserver-ns.yml" - state: "latest" - -- name: Calico | Ensure calico certs dir - file: - path: /etc/calico/certs - state: directory - mode: "0755" - when: calico_apiserver_secret.rc != 0 - -- name: Calico | Copy ssl script for apiserver certs - template: - src: make-ssl-calico.sh.j2 - dest: "{{ bin_dir }}/make-ssl-apiserver.sh" - mode: "0755" - when: calico_apiserver_secret.rc != 0 - -- name: Calico | Copy ssl config for apiserver certs - copy: - src: openssl.conf - dest: /etc/calico/certs/openssl.conf - mode: "0644" - when: calico_apiserver_secret.rc != 0 - -- name: Calico | Generate apiserver certs - command: >- - {{ bin_dir }}/make-ssl-apiserver.sh - -f /etc/calico/certs/openssl.conf - -c {{ kube_cert_dir }} - -d /etc/calico/certs - -s apiserver - when: calico_apiserver_secret.rc != 0 - -- name: Calico | Create calico apiserver generic secrets - command: >- - {{ kubectl }} -n calico-apiserver - create secret generic {{ item.name }} - --from-file={{ item.cert }} - --from-file={{ item.key }} - with_items: - - name: calico-apiserver-certs - cert: /etc/calico/certs/apiserver.crt - key: /etc/calico/certs/apiserver.key - when: calico_apiserver_secret.rc != 0 diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml deleted file mode 100644 index d512a9648b6..00000000000 --- a/roles/network_plugin/calico/tasks/check.yml +++ /dev/null @@ -1,235 +0,0 @@ ---- -- name: Stop if legacy encapsulation variables are detected (ipip) - assert: - that: - - ipip is not defined - msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: Stop if legacy encapsulation variables are detected (ipip_mode) - assert: - that: - - ipip_mode is not defined - msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks) - assert: - that: - - calcio_ipam_autoallocateblocks is not defined - msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - - -- name: Stop if supported Calico versions - assert: - that: - - "calico_version in calico_crds_archive_checksums.no_arch.keys()" - msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.no_arch.keys() }}" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: Check if calicoctl.sh exists - stat: - path: "{{ bin_dir }}/calicoctl.sh" - register: calicoctl_sh_exists - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: Check if calico ready - command: "{{ bin_dir }}/calicoctl.sh get ClusterInformation default" - register: calico_ready - run_once: true - ignore_errors: true - retries: 5 - delay: 10 - until: calico_ready.rc == 0 - delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: calicoctl_sh_exists.stat.exists - -- name: Check that current calico version is enough for upgrade - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0 - block: - - name: Get current calico version - shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Client Version:' | awk '{ print $3}'" - args: - executable: /bin/bash - register: calico_version_on_server - changed_when: false - - - name: Assert that current calico version is enough for upgrade - assert: - that: - - calico_version_on_server.stdout.removeprefix('v') is version(calico_min_version_required, '>=') - msg: > - Your version of calico is not fresh enough for upgrade. - Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release. - But current version is {{ calico_version_on_server.stdout }}. - -- name: "Check that cluster_id is set and a valid IPv4 address if calico_rr enabled" - assert: - that: - - cluster_id is defined - - cluster_id is ansible.utils.ipv4 - msg: "A unique cluster_id is required if using calico_rr, and it must be a valid IPv4 address" - when: - - peer_with_calico_rr - - inventory_hostname == groups['kube_control_plane'][0] - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check that calico_rr nodes are in k8s_cluster group" - assert: - that: - - '"k8s_cluster" in group_names' - msg: "calico_rr must be a child group of k8s_cluster group" - when: - - '"calico_rr" in group_names' - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check vars defined correctly" - assert: - that: - - "calico_pool_name is defined" - - "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')" - msg: "calico_pool_name contains invalid characters" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check calico network backend defined correctly" - assert: - that: - - "calico_network_backend in ['bird', 'vxlan', 'none']" - msg: "calico network backend is not 'bird', 'vxlan' or 'none'" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check ipip and vxlan mode defined correctly" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - assert: - that: - - "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']" - - "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']" - msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'" - -- name: "Check ipip and vxlan mode if simultaneously enabled" - assert: - that: - - "calico_vxlan_mode in ['Never']" - msg: "IP in IP and VXLAN mode is mutualy exclusive modes" - when: - - "calico_ipip_mode in ['Always', 'CrossSubnet']" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check ipip and vxlan mode if simultaneously enabled" - assert: - that: - - "calico_ipip_mode in ['Never']" - msg: "IP in IP and VXLAN mode is mutualy exclusive modes" - when: - - "calico_vxlan_mode in ['Always', 'CrossSubnet']" - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Get Calico {{ calico_pool_name }} configuration" - command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json" - failed_when: false - changed_when: false - check_mode: false - register: calico - run_once: true - when: ipv4_stack | bool - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Set calico_pool_conf" - set_fact: - calico_pool_conf: '{{ calico.stdout | from_json }}' - when: - - ipv4_stack | bool - - calico is defined - - calico.rc == 0 and calico.stdout - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check if inventory match current cluster configuration" - assert: - that: - - calico_pool_conf.spec.blockSize | int == calico_pool_blocksize | int - - calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet)) - - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode - - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode - msg: "Your inventory doesn't match the current cluster configuration" - when: - - ipv4_stack | bool - - calico_pool_conf is defined - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Get Calico {{ calico_pool_name }}-ipv6 configuration" - command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }}-ipv6 -o json" - failed_when: false - changed_when: false - check_mode: false - register: calico_ipv6 - run_once: true - when: ipv6_stack | bool - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Set calico_pool_ipv6_conf" - set_fact: - calico_pool_conf: '{{ calico_ipv6.stdout | from_json }}' - when: - - ipv6_stack | bool - - alico_ipv6 is defined - - calico_ipv6.rc == 0 and calico_ipv6.stdout - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check if ipv6 inventory match current cluster configuration" - assert: - that: - - calico_pool_conf.spec.blockSize | int == calico_pool_blocksize_ipv6 | int - - calico_pool_conf.spec.cidr == (calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6)) - - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode_ipv6 - - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode_ipv6 - msg: "Your ipv6 inventory doesn't match the current cluster configuration" - when: - - ipv6_stack | bool - - calico_pool_ipv6_conf is defined - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check kdd calico_datastore if calico_apiserver_enabled" - assert: - that: calico_datastore == "kdd" - msg: "When using calico apiserver you need to use the kubernetes datastore" - when: - - calico_apiserver_enabled - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check kdd calico_datastore if typha_enabled" - assert: - that: calico_datastore == "kdd" - msg: "When using typha you need to use the kubernetes datastore" - when: - - typha_enabled - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: "Check ipip mode is Never for calico ipv6" - assert: - that: - - "calico_ipip_mode_ipv6 in ['Never']" - msg: "Calico doesn't support ipip tunneling for the IPv6" - when: ipv6_stack | bool - run_once: true - delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml deleted file mode 100644 index 6b5d483b55e..00000000000 --- a/roles/network_plugin/calico/tasks/install.yml +++ /dev/null @@ -1,510 +0,0 @@ ---- -- name: Calico | Install Wireguard packages - package: - name: "{{ item }}" - state: present - with_items: "{{ calico_wireguard_packages }}" - register: calico_package_install - until: calico_package_install is succeeded - retries: 4 - when: calico_wireguard_enabled - -- name: Calico | Copy calicoctl binary from download dir - copy: - src: "{{ downloads.calicoctl.dest }}" - dest: "{{ bin_dir }}/calicoctl" - mode: "0755" - remote_src: true - -- name: Calico | Create calico certs directory - file: - dest: "{{ calico_cert_dir }}" - state: directory - mode: "0750" - owner: root - group: root - when: calico_datastore == "etcd" - -- name: Calico | Link etcd certificates for calico-node - file: - src: "{{ etcd_cert_dir }}/{{ item.s }}" - dest: "{{ calico_cert_dir }}/{{ item.d }}" - state: hard - mode: "0640" - force: true - with_items: - - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} - - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} - - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} - when: calico_datastore == "etcd" - -- name: Calico | Generate typha certs - include_tasks: typha_certs.yml - when: - - typha_secure - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Calico | Generate apiserver certs - include_tasks: calico_apiserver_certs.yml - when: - - calico_apiserver_enabled - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Calico | Install calicoctl wrapper script - template: - src: "calicoctl.{{ calico_datastore }}.sh.j2" - dest: "{{ bin_dir }}/calicoctl.sh" - mode: "0755" - owner: root - group: root - -- name: Calico | wait for etcd - uri: - url: "{{ etcd_access_addresses.split(',') | first }}/health" - validate_certs: false - client_cert: "{{ calico_cert_dir }}/cert.crt" - client_key: "{{ calico_cert_dir }}/key.pem" - register: result - until: result.status == 200 or result.status == 401 - retries: 10 - delay: 5 - run_once: true - when: calico_datastore == "etcd" - -- name: Calico | Check if calico network pool has already been configured - # noqa risky-shell-pipe - grep will exit 1 if no match found - shell: > - {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l - args: - executable: /bin/bash - register: calico_conf - retries: 4 - until: calico_conf.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - changed_when: false - when: - - inventory_hostname == groups['kube_control_plane'][0] - - ipv4_stack | bool - -- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined - assert: - that: "[calico_pool_cidr] | ansible.utils.ipaddr(kube_pods_subnet) | length == 1" - msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - - ipv4_stack | bool - - calico_pool_cidr is defined - - 'calico_conf.stdout == "0"' - -- name: Calico | Check if calico IPv6 network pool has already been configured - # noqa risky-shell-pipe - grep will exit 1 if no match found - shell: > - {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l - args: - executable: /bin/bash - register: calico_conf_ipv6 - retries: 4 - until: calico_conf_ipv6.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - changed_when: false - when: - - inventory_hostname == groups['kube_control_plane'][0] - - ipv6_stack - -- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined - assert: - that: "[calico_pool_cidr_ipv6] | ansible.utils.ipaddr(kube_pods_subnet_ipv6) | length == 1" - msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - - ipv6_stack | bool - - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" - - calico_pool_cidr_ipv6 is defined - -- name: Calico | kdd specific configuration - when: - - ('kube_control_plane' in group_names) - - calico_datastore == "kdd" - block: - - name: Calico | Check if extra directory is needed - stat: - path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('3.22.3', '<')) else 'crd' }}" - register: kdd_path - - name: Calico | Set kdd path when calico < v3.22.3 - set_fact: - calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/kdd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" - when: - - calico_version is version('3.22.3', '<') - - name: Calico | Set kdd path when calico > 3.22.2 - set_fact: - calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/crd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" - when: - - calico_version is version('3.22.2', '>') - - name: Calico | Create calico manifests for kdd - assemble: - src: "{{ calico_kdd_path }}" - dest: "{{ kube_config_dir }}/kdd-crds.yml" - mode: "0644" - delimiter: "---\n" - regexp: ".*\\.yaml" - remote_src: true - - - name: Calico | Create Calico Kubernetes datastore resources - kube: - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/kdd-crds.yml" - state: "latest" - register: kubectl_result - until: kubectl_result is succeeded - retries: 5 - when: - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Calico | Configure Felix - when: - - inventory_hostname == groups['kube_control_plane'][0] - block: - - name: Calico | Get existing FelixConfiguration - command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json" - register: _felix_cmd - ignore_errors: true - changed_when: false - - - name: Calico | Set kubespray FelixConfiguration - set_fact: - _felix_config: > - { - "kind": "FelixConfiguration", - "apiVersion": "projectcalico.org/v3", - "metadata": { - "name": "default", - }, - "spec": { - "ipipEnabled": {{ calico_ipip_mode != 'Never' }}, - "reportingInterval": "{{ calico_felix_reporting_interval }}", - "bpfLogLevel": "{{ calico_bpf_log_level }}", - "bpfEnabled": {{ calico_bpf_enabled | bool }}, - "bpfExternalServiceMode": "{{ calico_bpf_service_mode }}", - "wireguardEnabled": {{ calico_wireguard_enabled | bool }}, - "logSeverityScreen": "{{ calico_felix_log_severity_screen }}", - "vxlanEnabled": {{ calico_vxlan_mode != 'Never' }}, - "featureDetectOverride": "{{ calico_feature_detect_override }}", - "floatingIPs": "{{ calico_felix_floating_ips }}" - } - } - - - name: Calico | Process FelixConfiguration - set_fact: - _felix_config: "{{ _felix_cmd.stdout | from_json | combine(_felix_config, recursive=True) }}" - when: - - _felix_cmd is success - - - name: Calico | Configure calico FelixConfiguration - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}" - changed_when: false - -- name: Calico | Configure Calico IP Pool - when: - - inventory_hostname == groups['kube_control_plane'][0] - - ipv4_stack | bool - block: - - name: Calico | Get existing calico network pool - command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json" - register: _calico_pool_cmd - ignore_errors: true - changed_when: false - - - name: Calico | Set kubespray calico network pool - set_fact: - _calico_pool: > - { - "kind": "IPPool", - "apiVersion": "projectcalico.org/v3", - "metadata": { - "name": "{{ calico_pool_name }}", - }, - "spec": { - "blockSize": {{ calico_pool_blocksize }}, - "cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}", - "ipipMode": "{{ calico_ipip_mode }}", - "vxlanMode": "{{ calico_vxlan_mode }}", - "natOutgoing": {{ nat_outgoing | default(false) }} - } - } - - - name: Calico | Process calico network pool - when: - - _calico_pool_cmd is success - block: - - name: Calico | Get current calico network pool blocksize - set_fact: - _calico_blocksize: > - { - "spec": { - "blockSize": {{ (_calico_pool_cmd.stdout | from_json).spec.blockSize }} - } - } - - name: Calico | Merge calico network pool - set_fact: - _calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, _calico_blocksize, recursive=True) }}" - - - name: Calico | Configure calico network pool - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}" - changed_when: false - -- name: Calico | Configure Calico IPv6 Pool - when: - - inventory_hostname == groups['kube_control_plane'][0] - - ipv6_stack | bool - block: - - name: Calico | Get existing calico ipv6 network pool - command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json" - register: _calico_pool_ipv6_cmd - ignore_errors: true - changed_when: false - - - name: Calico | Set kubespray calico network pool - set_fact: - _calico_pool_ipv6: > - { - "kind": "IPPool", - "apiVersion": "projectcalico.org/v3", - "metadata": { - "name": "{{ calico_pool_name }}-ipv6", - }, - "spec": { - "blockSize": {{ calico_pool_blocksize_ipv6 }}, - "cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}", - "ipipMode": "{{ calico_ipip_mode_ipv6 }}", - "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}", - "natOutgoing": {{ nat_outgoing_ipv6 | default(false) }} - } - } - - - name: Calico | Process calico ipv6 network pool - when: - - _calico_pool_ipv6_cmd is success - block: - - name: Calico | Get current calico ipv6 network pool blocksize - set_fact: - _calico_blocksize_ipv6: > - { - "spec": { - "blockSize": {{ (_calico_pool_ipv6_cmd.stdout | from_json).spec.blockSize }} - } - } - - name: Calico | Merge calico ipv6 network pool - set_fact: - _calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, _calico_blocksize_ipv6, recursive=True) }}" - - - name: Calico | Configure calico ipv6 network pool - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}" - changed_when: false - -- name: Populate Service External IPs - set_fact: - _service_external_ips: "{{ _service_external_ips | default([]) + [{'cidr': item}] }}" - with_items: "{{ calico_advertise_service_external_ips }}" - run_once: true - -- name: Populate Service LoadBalancer IPs - set_fact: - _service_loadbalancer_ips: "{{ _service_loadbalancer_ips | default([]) + [{'cidr': item}] }}" - with_items: "{{ calico_advertise_service_loadbalancer_ips }}" - run_once: true - -- name: "Determine nodeToNodeMesh needed state" - set_fact: - nodeToNodeMeshEnabled: "false" - when: - - peer_with_router | default(false) or peer_with_calico_rr | default(false) - - ('k8s_cluster' in group_names) - run_once: true - -- name: Calico | Configure Calico BGP - when: - - inventory_hostname == groups['kube_control_plane'][0] - block: - - name: Calico | Get existing BGP Configuration - command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json" - register: _bgp_config_cmd - ignore_errors: true - changed_when: false - - - name: Calico | Set kubespray BGP Configuration - set_fact: - # noqa: jinja[spacing] - _bgp_config: > - { - "kind": "BGPConfiguration", - "apiVersion": "projectcalico.org/v3", - "metadata": { - "name": "default", - }, - "spec": { - "listenPort": {{ calico_bgp_listen_port }}, - "logSeverityScreen": "Info", - {% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %} - "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} , - {% if calico_advertise_cluster_ips | default(false) %} - "serviceClusterIPs": - {%- if ipv4_stack and ipv6_stack-%} - [{"cidr": "{{ kube_service_addresses }}", "cidr": "{{ kube_service_addresses_ipv6 }}"}], - {%- elif ipv6_stack-%} - [{"cidr": "{{ kube_service_addresses_ipv6 }}"}], - {%- else -%} - [{"cidr": "{{ kube_service_addresses }}"}], - {%- endif -%} - {% endif %} - {% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} - "serviceExternalIPs": {{ _service_external_ips | default([]) }} - } - } - - - name: Calico | Process BGP Configuration - set_fact: - _bgp_config: "{{ _bgp_config_cmd.stdout | from_json | combine(_bgp_config, recursive=True) }}" - when: - - _bgp_config_cmd is success - - - name: Calico | Set up BGP Configuration - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}" - changed_when: false - -- name: Calico | Create calico manifests - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: calico-config, file: calico-config.yml, type: cm} - - {name: calico-node, file: calico-node.yml, type: ds} - - {name: calico, file: calico-node-sa.yml, type: sa} - - {name: calico, file: calico-cr.yml, type: clusterrole} - - {name: calico, file: calico-crb.yml, type: clusterrolebinding} - - {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm } - register: calico_node_manifests - when: - - ('kube_control_plane' in group_names) - - rbac_enabled or item.type not in rbac_resources - -- name: Calico | Create calico manifests for typha - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: calico, file: calico-typha.yml, type: typha} - register: calico_node_typha_manifest - when: - - ('kube_control_plane' in group_names) - - typha_enabled - -- name: Calico | get calico apiserver caBundle - command: "{{ bin_dir }}/kubectl get secret -n calico-apiserver calico-apiserver-certs -o jsonpath='{.data.apiserver\\.crt}'" - changed_when: false - register: calico_apiserver_cabundle - when: - - inventory_hostname == groups['kube_control_plane'][0] - - calico_apiserver_enabled - -- name: Calico | set calico apiserver caBundle fact - set_fact: - calico_apiserver_cabundle: "{{ calico_apiserver_cabundle.stdout }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - - calico_apiserver_enabled - -- name: Calico | Create calico manifests for apiserver - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: calico, file: calico-apiserver.yml, type: calico-apiserver} - register: calico_apiserver_manifest - when: - - ('kube_control_plane' in group_names) - - calico_apiserver_enabled - -- name: Start Calico resources - kube: - name: "{{ item.item.name }}" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - with_items: - - "{{ calico_node_manifests.results }}" - - "{{ calico_node_typha_manifest.results }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - - not item is skipped - loop_control: - label: "{{ item.item.file }}" - -- name: Start Calico apiserver resources - kube: - name: "{{ item.item.name }}" - namespace: "calico-apiserver" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - with_items: - - "{{ calico_apiserver_manifest.results }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - - not item is skipped - loop_control: - label: "{{ item.item.file }}" - -- name: Wait for calico kubeconfig to be created - wait_for: - path: /etc/cni/net.d/calico-kubeconfig - timeout: "{{ calico_kubeconfig_wait_timeout }}" - when: - - inventory_hostname not in groups['kube_control_plane'] - - calico_datastore == "kdd" - -- name: Calico | Create Calico ipam manifests - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: calico, file: calico-ipamconfig.yml, type: ipam} - when: - - ('kube_control_plane' in group_names) - - calico_datastore == "kdd" - -- name: Calico | Create ipamconfig resources - kube: - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/calico-ipamconfig.yml" - state: "latest" - register: resource_result - until: resource_result is succeeded - retries: 4 - when: - - inventory_hostname == groups['kube_control_plane'][0] - - calico_datastore == "kdd" - -- name: Calico | Peer with Calico Route Reflector - include_tasks: peer_with_calico_rr.yml - when: - - peer_with_calico_rr | default(false) - -- name: Calico | Peer with the router - include_tasks: peer_with_router.yml - when: - - peer_with_router | default(false) diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml deleted file mode 100644 index 5921a91f338..00000000000 --- a/roles/network_plugin/calico/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Calico Pre tasks - import_tasks: pre.yml - -- name: Calico repos - import_tasks: repos.yml - -- name: Calico install - include_tasks: install.yml diff --git a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml deleted file mode 100644 index 53b49c1c4a9..00000000000 --- a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- -- name: Calico | Set label for groups nodes - command: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite" - changed_when: false - register: calico_group_id_label - until: calico_group_id_label is succeeded - delay: "{{ retry_stagger | random + 3 }}" - retries: 10 - when: - - calico_group_id is defined - -- name: Calico | Configure peering with route reflectors at global scope - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - # revert when it's already a string - stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" - vars: - stdin: > - {"apiVersion": "projectcalico.org/v3", - "kind": "BGPPeer", - "metadata": { - "name": "{{ calico_rr_id }}-to-node" - }, - "spec": { - "peerSelector": "calico-rr-id == '{{ calico_rr_id }}'", - "nodeSelector": "calico-group-id == '{{ calico_group_id }}'" - }} - register: output - retries: 4 - until: output.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - when: - - calico_rr_id is defined - - calico_group_id is defined - - ('calico_rr' in group_names) - -- name: Calico | Configure peering with route reflectors at global scope - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - # revert when it's already a string - stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" - vars: - stdin: > - {"apiVersion": "projectcalico.org/v3", - "kind": "BGPPeer", - "metadata": { - "name": "peer-to-rrs" - }, - "spec": { - "nodeSelector": "!has(i-am-a-route-reflector)", - "peerSelector": "has(i-am-a-route-reflector)" - }} - register: output - retries: 4 - until: output.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - with_items: - - "{{ groups['calico_rr'] | default([]) }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - - calico_rr_id is not defined or calico_group_id is not defined - -- name: Calico | Configure route reflectors to peer with each other - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - # revert when it's already a string - stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" - vars: - stdin: > - {"apiVersion": "projectcalico.org/v3", - "kind": "BGPPeer", - "metadata": { - "name": "rr-mesh" - }, - "spec": { - "nodeSelector": "has(i-am-a-route-reflector)", - "peerSelector": "has(i-am-a-route-reflector)" - }} - register: output - retries: 4 - until: output.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - with_items: - - "{{ groups['calico_rr'] | default([]) }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/network_plugin/calico/tasks/peer_with_router.yml b/roles/network_plugin/calico/tasks/peer_with_router.yml deleted file mode 100644 index ec4104bbe6a..00000000000 --- a/roles/network_plugin/calico/tasks/peer_with_router.yml +++ /dev/null @@ -1,116 +0,0 @@ ---- -- name: Calico | Configure peering with router(s) at global scope - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" - vars: - stdin: > - {"apiVersion": "projectcalico.org/v3", - "kind": "BGPPeer", - "metadata": { - "name": "global-{{ item.name | default(item.router_id | replace(':', '-')) }}" - }, - "spec": { - "asNumber": "{{ item.as }}", - "peerIP": "{{ item.router_id }}" - }} - register: output - retries: 4 - until: output.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - with_items: - - "{{ peers | default([]) | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'global') | list }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Calico | Get node for per node peering - command: - cmd: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }}" - register: output_get_node - when: - - ('k8s_cluster' in group_names) - - local_as is defined - - groups['calico_rr'] | default([]) | length == 0 - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: Calico | Patch node asNumber for per node peering - command: - cmd: |- - {{ bin_dir }}/calicoctl.sh patch node "{{ inventory_hostname }}" --patch '{{ patch is string | ternary(patch, patch | to_json) }}' - vars: - patch: > - {"spec": { - "bgp": { - "asNumber": "{{ local_as }}" - }, - "orchRefs": [{"nodeName": "{{ inventory_hostname }}", "orchestrator": "k8s"}] - }} - register: output - retries: 0 - until: output.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - when: - - ('k8s_cluster' in group_names) - - local_as is defined - - groups['calico_rr'] | default([]) | length == 0 - - output_get_node.rc == 0 - -- name: Calico | Configure node asNumber for per node peering - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" - vars: - stdin: > - {"apiVersion": "projectcalico.org/v3", - "kind": "Node", - "metadata": { - "name": "{{ inventory_hostname }}" - }, - "spec": { - "bgp": { - "asNumber": "{{ local_as }}" - }, - "orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}] - }} - register: output - retries: 4 - until: output.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - when: - - ('k8s_cluster' in group_names) - - local_as is defined - - groups['calico_rr'] | default([]) | length == 0 - - output_get_node.rc != 0 - -- name: Calico | Configure peering with router(s) at node scope - command: - cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" - stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" - vars: - stdin: > - {"apiVersion": "projectcalico.org/v3", - "kind": "BGPPeer", - "metadata": { - "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id | replace(':', '-')) }}" - }, - "spec": { - "asNumber": "{{ item.as }}", - "node": "{{ inventory_hostname }}", - "peerIP": "{{ item.router_id }}", - {% if calico_version is version('3.26.0', '>=') and (item.filters | default([]) | length > 0) %} - "filters": {{ item.filters }}, - {% endif %} - {% if calico_version is version('3.23.0', '>=') and (item.numallowedlocalasnumbers | default(0) > 0) %} - "numAllowedLocalASNumbers": {{ item.numallowedlocalasnumbers }}, - {% endif %} - "sourceAddress": "{{ item.sourceaddress | default('UseNodeIP') }}" - }} - register: output - retries: 4 - until: output.rc == 0 - delay: "{{ retry_stagger | random + 3 }}" - with_items: - - "{{ peers | default([]) | selectattr('scope', 'undefined') | list | union(peers | default([]) | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'node') | list ) }}" - delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: - - ('k8s_cluster' in group_names) diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml deleted file mode 100644 index f3f7797cb34..00000000000 --- a/roles/network_plugin/calico/tasks/pre.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- name: Slurp CNI config - slurp: - src: /etc/cni/net.d/10-calico.conflist - register: calico_cni_config_slurp - failed_when: false - -- name: Gather calico facts - tags: - - facts - when: calico_cni_config_slurp.content is defined - block: - - name: Set fact calico_cni_config from slurped CNI config - set_fact: - calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}" - - name: Set fact calico_datastore to etcd if needed - set_fact: - calico_datastore: etcd - when: - - "'plugins' in calico_cni_config" - - "'etcd_endpoints' in calico_cni_config.plugins.0" - -- name: Calico | Gather os specific variables - include_vars: "{{ item }}" - with_first_found: - - files: - - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" - - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" - - "{{ ansible_distribution | lower }}.yml" - - "{{ ansible_os_family | lower }}-{{ ansible_architecture }}.yml" - - "{{ ansible_os_family | lower }}.yml" - - defaults.yml - paths: - - ../vars - skip: true diff --git a/roles/network_plugin/calico/tasks/repos.yml b/roles/network_plugin/calico/tasks/repos.yml deleted file mode 100644 index 7eba916bbab..00000000000 --- a/roles/network_plugin/calico/tasks/repos.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Calico | Add wireguard yum repo - when: - - calico_wireguard_enabled - block: - - - name: Calico | Add wireguard yum repo - yum_repository: - name: copr:copr.fedorainfracloud.org:jdoss:wireguard - file: _copr:copr.fedorainfracloud.org:jdoss:wireguard - description: Copr repo for wireguard owned by jdoss - baseurl: "{{ calico_wireguard_repo }}" - gpgcheck: true - gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg - skip_if_unavailable: true - enabled: true - repo_gpgcheck: false - when: - - ansible_os_family in ['RedHat'] - - ansible_distribution not in ['Fedora'] - - ansible_facts['distribution_major_version'] | int < 9 diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml deleted file mode 100644 index 16c85097710..00000000000 --- a/roles/network_plugin/calico/tasks/reset.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: Reset | check vxlan.calico network device - stat: - path: /sys/class/net/vxlan.calico - get_attributes: false - get_checksum: false - get_mime: false - register: vxlan - -- name: Reset | remove the network vxlan.calico device created by calico - command: ip link del vxlan.calico - when: vxlan.stat.exists - -- name: Reset | check dummy0 network device - stat: - path: /sys/class/net/dummy0 - get_attributes: false - get_checksum: false - get_mime: false - register: dummy0 - -- name: Reset | remove the network device created by calico - command: ip link del dummy0 - when: dummy0.stat.exists - -- name: Reset | get and remove remaining routes set by bird - shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird " - args: - executable: /bin/bash - changed_when: false diff --git a/roles/network_plugin/calico/tasks/typha_certs.yml b/roles/network_plugin/calico/tasks/typha_certs.yml deleted file mode 100644 index ad87f5a024c..00000000000 --- a/roles/network_plugin/calico/tasks/typha_certs.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- name: Calico | Check if typha-server exists - command: "{{ kubectl }} -n kube-system get secret typha-server" - register: typha_server_secret - changed_when: false - failed_when: false - -- name: Calico | Ensure calico certs dir - file: - path: /etc/calico/certs - state: directory - mode: "0755" - when: typha_server_secret.rc != 0 - -- name: Calico | Copy ssl script for typha certs - template: - src: make-ssl-calico.sh.j2 - dest: "{{ bin_dir }}/make-ssl-typha.sh" - mode: "0755" - - when: typha_server_secret.rc != 0 - -- name: Calico | Copy ssl config for typha certs - copy: - src: openssl.conf - dest: /etc/calico/certs/openssl.conf - mode: "0644" - when: typha_server_secret.rc != 0 - -- name: Calico | Generate typha certs - command: >- - {{ bin_dir }}/make-ssl-typha.sh - -f /etc/calico/certs/openssl.conf - -c {{ kube_cert_dir }} - -d /etc/calico/certs - -s typha - when: typha_server_secret.rc != 0 - -- name: Calico | Create typha tls secrets - command: >- - {{ kubectl }} -n kube-system - create secret tls {{ item.name }} - --cert {{ item.cert }} - --key {{ item.key }} - with_items: - - name: typha-server - cert: /etc/calico/certs/typha-server.crt - key: /etc/calico/certs/typha-server.key - - name: typha-client - cert: /etc/calico/certs/typha-client.crt - key: /etc/calico/certs/typha-client.key - when: typha_server_secret.rc != 0 diff --git a/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 b/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 deleted file mode 100644 index a1bdfcb4a1a..00000000000 --- a/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 +++ /dev/null @@ -1,10 +0,0 @@ -# This is a tech-preview manifest which installs the Calico API server. Note that this manifest is liable to change -# or be removed in future releases without further warning. -# -# Namespace and namespace-scoped resources. -apiVersion: v1 -kind: Namespace -metadata: - labels: - name: calico-apiserver - name: calico-apiserver diff --git a/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 b/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 deleted file mode 100644 index e49c2b2d08b..00000000000 --- a/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 +++ /dev/null @@ -1,301 +0,0 @@ -# Policy to ensure the API server isn't cut off. Can be modified, but ensure -# that the main API server is always able to reach the Calico API server. -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-apiserver - namespace: calico-apiserver -spec: - podSelector: - matchLabels: - apiserver: "true" - ingress: - - ports: - - protocol: TCP - port: 5443 - ---- - -apiVersion: v1 -kind: Service -metadata: - name: calico-api - namespace: calico-apiserver -spec: - ports: - - name: apiserver - port: 443 - protocol: TCP - targetPort: 5443 - selector: - apiserver: "true" - type: ClusterIP - ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - apiserver: "true" - k8s-app: calico-apiserver - name: calico-apiserver - namespace: calico-apiserver -spec: - replicas: 1 - selector: - matchLabels: - apiserver: "true" - strategy: - type: Recreate - template: - metadata: - labels: - apiserver: "true" - k8s-app: calico-apiserver - name: calico-apiserver - namespace: calico-apiserver - spec: - containers: - - args: - - --secure-port=5443 - env: - - name: DATASTORE_TYPE - value: kubernetes - image: {{ calico_apiserver_image_repo }}:{{ calico_apiserver_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - livenessProbe: - httpGet: - path: /version - port: 5443 - scheme: HTTPS - initialDelaySeconds: 90 - periodSeconds: 10 - name: calico-apiserver -{% if calico_version is version('3.28.0', '>=') %} - readinessProbe: - httpGet: - path: /readyz - port: 5443 - scheme: HTTPS - timeoutSeconds: 5 - periodSeconds: 60 -{% else %} - readinessProbe: - exec: - command: - - /code/filecheck - failureThreshold: 5 - initialDelaySeconds: 5 - periodSeconds: 10 -{% endif %} - securityContext: - privileged: false - runAsUser: 0 - volumeMounts: - - mountPath: /code/apiserver.local.config/certificates - name: calico-apiserver-certs - dnsPolicy: ClusterFirst - nodeSelector: - kubernetes.io/os: linux - restartPolicy: Always - serviceAccount: calico-apiserver - serviceAccountName: calico-apiserver - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - volumes: - - name: calico-apiserver-certs - secret: - secretName: calico-apiserver-certs - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-apiserver - namespace: calico-apiserver - ---- - -# Cluster-scoped resources below here. -apiVersion: apiregistration.k8s.io/v1 -kind: APIService -metadata: - name: v3.projectcalico.org -spec: - group: projectcalico.org - groupPriorityMinimum: 1500 - caBundle: {{ calico_apiserver_cabundle }} - service: - name: calico-api - namespace: calico-apiserver - port: 443 - version: v3 - versionPriority: 200 - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: calico-crds -rules: -- apiGroups: - - extensions - - networking.k8s.io - - "" - resources: - - networkpolicies - - nodes - - namespaces - - pods - - serviceaccounts - verbs: - - get - - list - - watch -- apiGroups: - - crd.projectcalico.org - resources: - - globalnetworkpolicies - - networkpolicies - - clusterinformations - - hostendpoints - - globalnetworksets - - networksets - - bgpconfigurations - - bgppeers - - bgpfilters - - felixconfigurations - - kubecontrollersconfigurations - - ippools - - ipamconfigs - - ipreservations - - ipamblocks - - blockaffinities - - caliconodestatuses - - tiers - verbs: - - get - - list - - watch - - create - - update - - delete -{% if calico_version is version('3.28.0', '>=') %} -- apiGroups: - - policy - resourceNames: - - calico-apiserver - resources: - - podsecuritypolicies - verbs: - - use -{% endif %} ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: calico-extension-apiserver-auth-access -rules: -- apiGroups: - - "" - resourceNames: - - extension-apiserver-authentication - resources: - - configmaps - verbs: - - list - - watch - - get -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - - clusterrolebindings - - roles - - rolebindings - verbs: - - get - - list - - watch - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: calico-webhook-reader -rules: -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - list - - watch - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-apiserver-access-crds -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-crds -subjects: -- kind: ServiceAccount - name: calico-apiserver - namespace: calico-apiserver - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-apiserver-delegate-auth -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: calico-apiserver - namespace: calico-apiserver - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-apiserver-webhook-reader -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-webhook-reader -subjects: -- kind: ServiceAccount - name: calico-apiserver - namespace: calico-apiserver - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-extension-apiserver-auth-access -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-extension-apiserver-auth-access -subjects: -- kind: ServiceAccount - name: calico-apiserver - namespace: calico-apiserver diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 deleted file mode 100644 index 1e87917ea71..00000000000 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ /dev/null @@ -1,106 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: -{% if calico_datastore == "etcd" %} - etcd_endpoints: "{{ etcd_access_addresses }}" - etcd_ca: "/calico-secrets/ca_cert.crt" - etcd_cert: "/calico-secrets/cert.crt" - etcd_key: "/calico-secrets/key.pem" -{% elif calico_datastore == "kdd" and typha_enabled %} - # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas - # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is - # essential. - typha_service_name: "calico-typha" -{% endif %} -{% if calico_network_backend == 'bird' %} - cluster_type: "kubespray,bgp" - calico_backend: "bird" -{% else %} - cluster_type: "kubespray" - calico_backend: "{{ calico_network_backend }}" -{% endif %} -{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router | default(false) %} - as: "{{ local_as | default(global_as_num) }}" -{% endif -%} - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "{{ calico_cni_name }}", - "cniVersion":"0.3.1", - "plugins":[ - { - {% if calico_datastore == "kdd" %} - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - {% endif %} - "type": "calico", - "log_level": "info", - {% if calico_cni_log_file_path %} - "log_file_path": "{{ calico_cni_log_file_path }}", - {% endif %} - {% if calico_datastore == "etcd" %} - "etcd_endpoints": "{{ etcd_access_addresses }}", - "etcd_cert_file": "{{ calico_cert_dir }}/cert.crt", - "etcd_key_file": "{{ calico_cert_dir }}/key.pem", - "etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt", - {% endif %} - {% if calico_ipam_host_local %} - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - {% else %} - "ipam": { - "type": "calico-ipam", - {% if ipv4_stack %} - "assign_ipv4": "true"{{ ',' if (ipv6_stack and ipv4_stack) }} - {% endif %} - {% if ipv6_stack %} - "assign_ipv6": "true" - {% endif %} - }, - {% endif %} - {% if calico_allow_ip_forwarding %} - "container_settings": { - "allow_ip_forwarding": true - }, - {% endif %} - {% if (calico_feature_control is defined) and (calico_feature_control | length > 0) %} - "feature_control": { - {% for fc in calico_feature_control -%} - {% set fcval = calico_feature_control[fc] -%} - "{{ fc }}": {{ (fcval | string | lower) if (fcval == true or fcval == false) else "\"" + fcval + "\"" }}{{ "," if not loop.last else "" }} - {% endfor -%} - {{- "" }} - }, - {% endif %} - {% if enable_network_policy %} - "policy": { - "type": "k8s" - }, - {% endif %} - {% if calico_mtu is defined and calico_mtu is number %} - "mtu": {{ calico_mtu }}, - {% endif %} - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type":"portmap", - "capabilities": { - "portMappings": true - } - }, - { - "type":"bandwidth", - "capabilities": { - "bandwidth": true - } - } - ] - } diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2 deleted file mode 100644 index 96f59df2955..00000000000 --- a/roles/network_plugin/calico/templates/calico-cr.yml.j2 +++ /dev/null @@ -1,213 +0,0 @@ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-cni-plugin -rules: - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - - apiGroups: [""] - resources: - - nodes/status - verbs: - - update - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - - clusterinformations - - ippools - - ipreservations - - ipamconfigs - verbs: - - get - - list - - create - - update - - delete ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-node - namespace: kube-system -rules: - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - - configmaps - verbs: - - get - # EndpointSlices are used for Service-based network policy rule - # enforcement. - - apiGroups: ["discovery.k8s.io"] - resources: - - endpointslices - verbs: - - watch - - list - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - - watch - - list -{% if calico_datastore == "kdd" %} - # Used to discover Typhas. - - get -{% endif %} - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch -{% if calico_datastore == "kdd" %} - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Watch for changes to Kubernetes AdminNetworkPolicies. - - apiGroups: ["policy.networking.k8s.io"] - resources: - - adminnetworkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - bgpfilters - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipreservations - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - - caliconodestatuses - - tiers - verbs: - - get - - list - - watch - # Calico creates some tiers on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - tiers - verbs: - - create - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico must update some CRDs. - - apiGroups: [ "crd.projectcalico.org" ] - resources: - - caliconodestatuses - verbs: - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only required for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - # These permissions are required for Calico CNI to perform IPAM allocations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipamconfigs - verbs: - - get - - create - # Block affinities must also be watchable by confd for route aggregation. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - verbs: - - watch - # The Calico IPAM migration needs to get daemonsets. These permissions can be - # removed if not upgrading from an installation using host-local IPAM. - - apiGroups: ["apps"] - resources: - - daemonsets - verbs: - - get -{% endif %} - # Used for creating service account tokens to be used by the CNI plugin - - apiGroups: [""] - resources: - - serviceaccounts/token - resourceNames: - - calico-cni-plugin - verbs: - - create diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2 deleted file mode 100644 index add99ba5253..00000000000 --- a/roles/network_plugin/calico/templates/calico-crb.yml.j2 +++ /dev/null @@ -1,28 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-cni-plugin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-cni-plugin -subjects: -- kind: ServiceAccount - name: calico-cni-plugin - namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 b/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 deleted file mode 100644 index af7e2117cef..00000000000 --- a/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: crd.projectcalico.org/v1 -kind: IPAMConfig -metadata: - name: default -spec: - autoAllocateBlocks: {{ calico_ipam_autoallocateblocks }} - strictAffinity: {{ calico_ipam_strictaffinity }} - maxBlocksPerHost: {{ calico_ipam_maxblocksperhost }} diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 deleted file mode 100644 index 07433039bdd..00000000000 --- a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-cni-plugin - namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 deleted file mode 100644 index ad3eefc40b4..00000000000 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ /dev/null @@ -1,513 +0,0 @@ ---- -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each control plane and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: -{% if calico_datastore == "etcd" %} - kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}" -{% endif %} -{% if calico_felix_prometheusmetricsenabled %} - prometheus.io/scrape: 'true' - prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}" -{% endif %} - spec: - nodeSelector: - {{ calico_ds_nodeselector }} - priorityClassName: system-node-critical - hostNetwork: true - serviceAccountName: calico-node - tolerations: - # Make sure calico-node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - initContainers: -{% if calico_datastore == "kdd" and not calico_ipam_host_local %} - # This container performs upgrade from host-local IPAM to calico-ipam. - # It can be deleted if this is a fresh installation, or if you have already - # upgraded to use calico-ipam. - - name: upgrade-ipam - image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - command: ["/opt/cni/bin/calico-ipam", "-upgrade"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - volumeMounts: - - mountPath: /var/lib/cni/networks - name: host-local-net-dir - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - securityContext: - privileged: true -{% endif %} - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - command: ["/opt/cni/bin/install"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" -{% if calico_mtu is defined %} - # CNI MTU Config variable - - name: CNI_MTU - value: "{{ calico_veth_mtu | default(calico_mtu) }}" -{% endif %} - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" -{% if calico_datastore == "etcd" %} - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints -{% endif %} -{% if calico_datastore == "kdd" %} - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName -{% endif %} - volumeMounts: - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - securityContext: - privileged: true - # This init container mounts the necessary filesystems needed by the BPF data plane - # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed - # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. - - name: "mount-bpffs" - image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - command: ["calico-node", "-init", "-best-effort"] - volumeMounts: - - mountPath: /sys/fs - name: sys-fs - # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host - # so that it outlives the init container. - mountPropagation: Bidirectional - - mountPath: /var/run/calico - name: var-run-calico - # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host - # so that it outlives the init container. - mountPropagation: Bidirectional - # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, - # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. - - mountPath: /nodeproc - name: nodeproc - readOnly: true - securityContext: - privileged: true - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # The location of the Calico etcd cluster. -{% if calico_datastore == "etcd" %} - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Location of the CA certificate for etcd. - - name: ETCD_CA_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_ca - # Location of the client key for etcd. - - name: ETCD_KEY_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_key - # Location of the client certificate for etcd. - - name: ETCD_CERT_FILE - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_cert -{% elif calico_datastore == "kdd" %} - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" -{% if typha_enabled %} - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name -{% if typha_secure %} - - name: FELIX_TYPHACN - value: typha-server - - name: FELIX_TYPHACAFILE - value: /etc/typha-ca/ca.crt - - name: FELIX_TYPHACERTFILE - value: /etc/typha-client/typha-client.crt - - name: FELIX_TYPHAKEYFILE - value: /etc/typha-client/typha-client.key -{% endif %} -{% endif %} - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" -{% endif %} -{% if calico_network_backend == 'vxlan' %} - - name: FELIX_VXLANVNI - value: "{{ calico_vxlan_vni }}" - - name: FELIX_VXLANPORT - value: "{{ calico_vxlan_port }}" -{% endif %} - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{ calico_endpoint_to_host_action | default('RETURN') }}" - - name: FELIX_HEALTHHOST - value: "{{ calico_healthhost }}" -{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %} - - name: FELIX_KUBENODEPORTRANGES - value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}" -{% endif %} - - name: FELIX_IPTABLESBACKEND - value: "{{ calico_iptables_backend }}" - - name: FELIX_IPTABLESLOCKTIMEOUTSECS - value: "{{ calico_iptables_lock_timeout_secs }}" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - # - name: CALICO_IPV4POOL_CIDR - # value: "192.168.0.0/16" - - name: CALICO_IPV4POOL_IPIP - value: "{{ calico_ipv4pool_ipip }}" - # Enable or Disable VXLAN on the default IP pool. - - name: CALICO_IPV4POOL_VXLAN - value: "Never" - - name: FELIX_IPV6SUPPORT - value: "{{ ipv6_stack | default(false) }}" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "{{ calico_loglevel }}" - # Set Calico startup logging to "error" - - name: CALICO_STARTUP_LOGLEVEL - value: "{{ calico_node_startup_loglevel }}" - # Enable or disable usage report - - name: FELIX_USAGEREPORTINGENABLED - value: "{{ calico_usage_reporting }}" -{% if calico_version is version('3.29.0', '>=') %} - - name: FELIX_NFTABLESMODE - value: "{{ calico_nftable_mode }}" -{% endif %} - # Set MTU for tunnel device used if ipip is enabled -{% if calico_mtu is defined %} - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - value: "{{ calico_veth_mtu | default(calico_mtu) }}" - # Set MTU for the VXLAN tunnel device. - - name: FELIX_VXLANMTU - value: "{{ calico_veth_mtu | default(calico_mtu) }}" - # Set MTU for the Wireguard tunnel device. - - name: FELIX_WIREGUARDMTU - value: "{{ calico_veth_mtu | default(calico_mtu) }}" -{% endif %} - - name: FELIX_CHAININSERTMODE - value: "{{ calico_felix_chaininsertmode }}" - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{ calico_felix_prometheusmetricsenabled }}" - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{ calico_felix_prometheusmetricsport }}" - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{ calico_felix_prometheusgometricsenabled }}" - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{ calico_felix_prometheusprocessmetricsenabled }}" -{% if calico_ip_auto_method is defined %} - - name: IP_AUTODETECTION_METHOD - value: "{{ calico_ip_auto_method }}" -{% else %} - - name: NODEIP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: IP_AUTODETECTION_METHOD - value: "can-reach=$(NODEIP)" -{% endif %} -{% if ipv4_stack %} - - name: IP - value: "autodetect" -{% else %} - - name: IP - value: none -{% endif %} -{% if ipv6_stack %} - - name: IP6 - value: autodetect -{% endif %} -{% if calico_ip6_auto_method is defined and ipv6_stack %} - - name: IP6_AUTODETECTION_METHOD - value: "{{ calico_ip6_auto_method }}" -{% endif %} -{% if calico_felix_mtu_iface_pattern is defined %} - - name: FELIX_MTUIFACEPATTERN - value: "{{ calico_felix_mtu_iface_pattern }}" -{% endif %} -{% if calico_use_default_route_src_ipaddr | default(false) %} - - name: FELIX_DEVICEROUTESOURCEADDRESS - valueFrom: - fieldRef: - fieldPath: status.hostIP -{% endif %} - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: FELIX_HEALTHENABLED - value: "true" - - name: FELIX_IGNORELOOSERPF - value: "{{ calico_node_ignorelooserpf }}" - - name: CALICO_MANAGE_CNI - value: "true" -{% if calico_ipam_host_local %} - - name: USE_POD_CIDR - value: "true" -{% endif %} -{% if calico_node_extra_envs is defined %} -{% for key in calico_node_extra_envs %} - - name: {{ key }} - value: "{{ calico_node_extra_envs[key] }}" -{% endfor %} -{% endif %} - securityContext: - privileged: true - resources: - limits: -{% if calico_node_cpu_limit != "0" %} - cpu: {{ calico_node_cpu_limit }} -{% endif %} - memory: {{ calico_node_memory_limit }} - requests: - cpu: {{ calico_node_cpu_requests }} - memory: {{ calico_node_memory_requests }} - lifecycle: - preStop: - exec: - command: - - /bin/calico-node - - -shutdown - livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live -{% if calico_network_backend == "bird" %} - - -bird-live -{% endif %} - periodSeconds: 10 - initialDelaySeconds: 10 - timeoutSeconds: {{ calico_node_livenessprobe_timeout | default(10) }} - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node -{% if calico_network_backend == "bird" %} - - -bird-ready -{% endif %} - - -felix-ready - periodSeconds: 10 - timeoutSeconds: {{ calico_node_readinessprobe_timeout | default(10) }} - failureThreshold: 6 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false -{% if calico_datastore == "etcd" %} - - mountPath: /calico-secrets - name: etcd-certs - readOnly: true -{% endif %} - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - # For maintaining CNI plugin API credentials. - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - readOnly: false -{% if typha_secure %} - - name: typha-client - mountPath: /etc/typha-client - readOnly: true - - name: typha-cacert - subPath: ca.crt - mountPath: /etc/typha-ca/ca.crt - readOnly: true -{% endif %} - - name: policysync - mountPath: /var/run/nodeagent - # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the - # parent directory. - - name: bpffs - mountPath: /sys/fs/bpf - - name: cni-log-dir - mountPath: /var/log/calico/cni - readOnly: true - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - type: DirectoryOrCreate - - name: var-lib-calico - hostPath: - path: /var/lib/calico - type: DirectoryOrCreate - # Used to install CNI. - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - type: DirectoryOrCreate -{% if calico_datastore == "etcd" %} - # Mount in the etcd TLS secrets. - - name: etcd-certs - hostPath: - path: "{{ calico_cert_dir }}" -{% endif %} - # Mount the global iptables lock file, used by calico/node - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate -{% if calico_datastore == "kdd" and not calico_ipam_host_local %} - # Mount in the directory for host-local IPAM allocations. This is - # used when upgrading from host-local to calico-ipam, and can be removed - # if not using the upgrade-ipam init container. - - name: host-local-net-dir - hostPath: - path: /var/lib/cni/networks -{% endif %} -{% if typha_enabled and typha_secure %} - - name: typha-client - secret: - secretName: typha-client - items: - - key: tls.crt - path: typha-client.crt - - key: tls.key - path: typha-client.key - - name: typha-cacert - hostPath: - path: "/etc/kubernetes/ssl/" -{% endif %} - - name: sys-fs - hostPath: - path: /sys/fs/ - type: DirectoryOrCreate - - name: bpffs - hostPath: - path: /sys/fs/bpf - type: Directory - # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. - - name: nodeproc - hostPath: - path: /proc - # Used to access CNI logs. - - name: cni-log-dir - hostPath: - path: /var/log/calico/cni - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent diff --git a/roles/network_plugin/calico/templates/calico-typha.yml.j2 b/roles/network_plugin/calico/templates/calico-typha.yml.j2 deleted file mode 100644 index f2cf74f9768..00000000000 --- a/roles/network_plugin/calico/templates/calico-typha.yml.j2 +++ /dev/null @@ -1,186 +0,0 @@ -# This manifest creates a Service, which will be backed by Calico's Typha daemon. -# Typha sits in between Felix and the API server, reducing Calico's load on the API server. - -apiVersion: v1 -kind: Service -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha -{% if typha_prometheusmetricsenabled %} - - port: {{ typha_prometheusmetricsport }} - protocol: TCP - targetPort: http-metrics - name: metrics -{% endif %} - selector: - k8s-app: calico-typha - ---- - -# This manifest creates a Deployment of Typha to back the above service. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the calico-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: {{ typha_replicas }} - revisionHistoryLimit: 2 - selector: - matchLabels: - k8s-app: calico-typha - template: - metadata: - labels: - k8s-app: calico-typha - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' -{% if typha_prometheusmetricsenabled %} - prometheus.io/scrape: 'true' - prometheus.io/port: "{{ typha_prometheusmetricsport }}" -{% endif %} - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - # Since Calico can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: calico-node - priorityClassName: system-cluster-critical - # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 - securityContext: - fsGroup: 65534 - containers: - - image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP -{% if typha_prometheusmetricsenabled %} - - containerPort: {{ typha_prometheusmetricsport }} - name: http-metrics - protocol: TCP -{% endif %} - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - - name: TYPHA_MAXCONNECTIONSLOWERLIMIT - value: "{{ typha_max_connections_lower_limit }}" -{% if typha_secure %} - - name: TYPHA_CAFILE - value: /etc/ca/ca.crt - - name: TYPHA_CLIENTCN - value: typha-client - - name: TYPHA_SERVERCERTFILE - value: /etc/typha/server_certificate.pem - - name: TYPHA_SERVERKEYFILE - value: /etc/typha/server_key.pem -{% endif %} -{% if typha_prometheusmetricsenabled %} - # Since Typha is host-networked, - # this opens a port on the host, which may need to be secured. - - name: TYPHA_PROMETHEUSMETRICSENABLED - value: "true" - - name: TYPHA_PROMETHEUSMETRICSPORT - value: "{{ typha_prometheusmetricsport }}" -{% endif %} -{% if calico_ipam_host_local %} - - name: USE_POD_CIDR - value: "true" -{% endif %} -{% if typha_secure %} - volumeMounts: - - mountPath: /etc/typha - name: typha-server - readOnly: true - - mountPath: /etc/ca/ca.crt - subPath: ca.crt - name: cacert - readOnly: true -{% endif %} - livenessProbe: - httpGet: - path: /liveness - port: 9098 - host: localhost - periodSeconds: 30 - initialDelaySeconds: 30 - readinessProbe: - httpGet: - path: /readiness - port: 9098 - host: localhost - periodSeconds: 10 -{% if typha_secure %} - volumes: - - name: typha-server - secret: - secretName: typha-server - items: - - key: tls.crt - path: server_certificate.pem - - key: tls.key - path: server_key.pem - - name: cacert - hostPath: - path: "{{ kube_cert_dir }}" -{% endif %} - ---- - -# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict - -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-typha diff --git a/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 b/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 deleted file mode 100644 index fcde4a5e35e..00000000000 --- a/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -ETCD_ENDPOINTS={{ etcd_access_addresses }} \ -ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ -ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ -ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ -{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 b/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 deleted file mode 100644 index ef89f3986cf..00000000000 --- a/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -DATASTORE_TYPE=kubernetes \ -{% if inventory_hostname in groups['kube_control_plane'] %} -KUBECONFIG=/etc/kubernetes/admin.conf \ -{% else %} -KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \ -{% endif %} -{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 b/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 deleted file mode 100644 index f1e81776edb..00000000000 --- a/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: kube-system - name: kubernetes-services-endpoint -data: -{% if calico_bpf_enabled %} - KUBERNETES_SERVICE_HOST: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" - KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" -{% endif %} diff --git a/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 b/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 deleted file mode 100644 index 94b2022e760..00000000000 --- a/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash - -# Author: Smana smainklh@gmail.com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o pipefail -usage() -{ - cat << EOF -Create self signed certificates - -Usage : $(basename $0) -f [-d ] - -h | --help : Show this message - -f | --config : Openssl configuration file - -d | --ssldir : Directory where the certificates will be installed - -c | --cadir : Directory where the existing CA is located - -s | --service : Service for the ca - - ex : - $(basename $0) -f openssl.conf -d /srv/ssl -EOF -} - -# Options parsing -while (($#)); do - case "$1" in - -h | --help) usage; exit 0;; - -f | --config) CONFIG=${2}; shift 2;; - -d | --ssldir) SSLDIR="${2}"; shift 2;; - -c | --cadir) CADIR="${2}"; shift 2;; - -s | --service) SERVICE="${2}"; shift 2;; - *) - usage - echo "ERROR : Unknown option" - exit 3 - ;; - esac -done - -if [ -z ${CONFIG} ]; then - echo "ERROR: the openssl configuration file is missing. option -f" - exit 1 -fi -if [ -z ${SSLDIR} ]; then - SSLDIR="/etc/calico/certs" -fi - -tmpdir=$(mktemp -d /tmp/calico_${SERVICE}_certs.XXXXXX) -trap 'rm -rf "${tmpdir}"' EXIT -cd "${tmpdir}" - -mkdir -p ${SSLDIR} ${CADIR} - -# Root CA -if [ -e "$CADIR/ca.key" ]; then - # Reuse existing CA - cp $CADIR/{ca.crt,ca.key} . -else - openssl genrsa -out ca.key {{certificates_key_size}} > /dev/null 2>&1 - openssl req -x509 -new -nodes -key ca.key -days {{certificates_duration}} -out ca.crt -subj "/CN=calico-${SERVICE}-ca" > /dev/null 2>&1 -fi - -if [ $SERVICE == "typha" ]; then - # Typha server - openssl genrsa -out typha-server.key {{certificates_key_size}} > /dev/null 2>&1 - openssl req -new -key typha-server.key -out typha-server.csr -subj "/CN=typha-server" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in typha-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-server.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 - - # Typha client - openssl genrsa -out typha-client.key {{certificates_key_size}} > /dev/null 2>&1 - openssl req -new -key typha-client.key -out typha-client.csr -subj "/CN=typha-client" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in typha-client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-client.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 - -elif [ $SERVICE == "apiserver" ]; then - # calico-apiserver - openssl genrsa -out apiserver.key {{certificates_key_size}} > /dev/null 2>&1 - openssl req -new -key apiserver.key -out apiserver.csr -subj "/CN=calico-apiserver" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out apiserver.crt -days {{certificates_duration}} -extensions ssl_client_apiserver -extfile ${CONFIG} > /dev/null 2>&1 -else - echo "ERROR: the openssl configuration file is missing. option -s" - exit 1 -fi - -# Install certs -if [ -e "$CADIR/ca.key" ]; then - # No pass existing CA - rm -f ca.crt ca.key -fi - -mv {*.crt,*.key} ${SSLDIR}/ diff --git a/roles/network_plugin/calico/vars/amazon.yml b/roles/network_plugin/calico/vars/amazon.yml deleted file mode 100644 index 83efdcdb084..00000000000 --- a/roles/network_plugin/calico/vars/amazon.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-7-$basearch/ -calico_wireguard_packages: - - wireguard-dkms - - wireguard-tools diff --git a/roles/network_plugin/calico/vars/centos-9.yml b/roles/network_plugin/calico/vars/centos-9.yml deleted file mode 100644 index 43df5457a39..00000000000 --- a/roles/network_plugin/calico/vars/centos-9.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -calico_wireguard_packages: - - wireguard-tools diff --git a/roles/network_plugin/calico/vars/debian.yml b/roles/network_plugin/calico/vars/debian.yml deleted file mode 100644 index baf603cfd9c..00000000000 --- a/roles/network_plugin/calico/vars/debian.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -calico_wireguard_packages: - - wireguard diff --git a/roles/network_plugin/calico/vars/fedora.yml b/roles/network_plugin/calico/vars/fedora.yml deleted file mode 100644 index 43df5457a39..00000000000 --- a/roles/network_plugin/calico/vars/fedora.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -calico_wireguard_packages: - - wireguard-tools diff --git a/roles/network_plugin/calico/vars/opensuse.yml b/roles/network_plugin/calico/vars/opensuse.yml deleted file mode 100644 index 43df5457a39..00000000000 --- a/roles/network_plugin/calico/vars/opensuse.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -calico_wireguard_packages: - - wireguard-tools diff --git a/roles/network_plugin/calico/vars/redhat-9.yml b/roles/network_plugin/calico/vars/redhat-9.yml deleted file mode 100644 index 43df5457a39..00000000000 --- a/roles/network_plugin/calico/vars/redhat-9.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -calico_wireguard_packages: - - wireguard-tools diff --git a/roles/network_plugin/calico/vars/redhat.yml b/roles/network_plugin/calico/vars/redhat.yml deleted file mode 100644 index a83a8a5fed8..00000000000 --- a/roles/network_plugin/calico/vars/redhat.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -calico_wireguard_packages: - - wireguard-dkms - - wireguard-tools diff --git a/roles/network_plugin/calico/vars/rocky-9.yml b/roles/network_plugin/calico/vars/rocky-9.yml deleted file mode 100644 index 43df5457a39..00000000000 --- a/roles/network_plugin/calico/vars/rocky-9.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -calico_wireguard_packages: - - wireguard-tools diff --git a/roles/network_plugin/calico_defaults/defaults/main.yml b/roles/network_plugin/calico_defaults/defaults/main.yml deleted file mode 100644 index 899a9fd7180..00000000000 --- a/roles/network_plugin/calico_defaults/defaults/main.yml +++ /dev/null @@ -1,177 +0,0 @@ ---- -# the default value of name -calico_cni_name: k8s-pod-network - -# Enables Internet connectivity from containers -nat_outgoing: true -nat_outgoing_ipv6: false - -# add default ippool name -calico_pool_name: "default-pool" -calico_ipv4pool_ipip: "Off" - -# Change encapsulation mode, by default we enable vxlan which is the most mature and well tested mode -calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet' -calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet' - -# add default ippool blockSize -calico_pool_blocksize: 26 - -# Calico doesn't support ipip tunneling for the IPv6. -calico_ipip_mode_ipv6: Never -calico_vxlan_mode_ipv6: Always - -# add default ipv6 ippool blockSize -calico_pool_blocksize_ipv6: 122 - -# Calico network backend can be 'bird', 'vxlan' and 'none' -calico_network_backend: vxlan - -calico_cert_dir: /etc/calico/certs - -# Global as_num (/calico/bgp/v1/global/as_num) -global_as_num: "64512" - -# You can set MTU value here. If left undefined or empty, it will -# not be specified in calico CNI config, so Calico will use built-in -# defaults. The value should be a number, not a string. -# calico_mtu: 1500 - -# Advertise Service External IPs -calico_advertise_service_external_ips: [] - -# Advertise Service LoadBalancer IPs -calico_advertise_service_loadbalancer_ips: [] - -# Calico eBPF support -calico_bpf_enabled: false -calico_bpf_log_level: "" -# Valid option for service mode: Tunnel (default), DSR=Direct Server Return -calico_bpf_service_mode: Tunnel - -# Calico floatingIPs support -# Valid option for floatingIPs: Disabled (default), Enabled -calico_felix_floating_ips: Disabled - -# Limits for apps -calico_node_memory_limit: 500M -calico_node_cpu_limit: "0" -calico_node_memory_requests: 64M -calico_node_cpu_requests: 150m -calico_felix_chaininsertmode: Insert - -# Calico daemonset nodeselector -calico_ds_nodeselector: "kubernetes.io/os: linux" - -# Virtual network ID to use for VXLAN traffic. A value of 0 means “use the kernel default”. -calico_vxlan_vni: 4096 - -# Port to use for VXLAN traffic. A value of 0 means “use the kernel default”. -calico_vxlan_port: 4789 - -# Enable Prometheus Metrics endpoint for felix -calico_felix_prometheusmetricsenabled: false -calico_felix_prometheusmetricsport: 9091 -calico_felix_prometheusgometricsenabled: true -calico_felix_prometheusprocessmetricsenabled: true - -# Set the agent log level. Can be debug, warning, info or fatal -calico_loglevel: info -calico_node_startup_loglevel: error - -# Set log path for calico CNI plugin. Set to false to disable logging to disk. -calico_cni_log_file_path: /var/log/calico/cni/cni.log - -# Enable or disable usage report to 'usage.projectcalico.org' -calico_usage_reporting: false - -# Should calico ignore kernel's RPF check setting, -# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198 -calico_node_ignorelooserpf: false - -# Define address on which Felix will respond to health requests -calico_healthhost: "localhost" - -# Configure time in seconds that calico will wait for the iptables lock -calico_iptables_lock_timeout_secs: 10 - -# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" (FELIX_IPTABLESBACKEND) -calico_iptables_backend: "Auto" - -# Calico NFTable Mode Support (tech preview 3.29) -# Valid option: Disabled (default), Enabled -calico_nftable_mode: "Disabled" - -# Calico Wireguard support -calico_wireguard_enabled: false -calico_wireguard_packages: [] -calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-{{ ansible_distribution_major_version }}-$basearch/ - -# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: -# * can-reach=DESTINATION -# * interface=INTERFACE-REGEX -# see https://projectcalico.docs.tigera.io/reference/node/configuration#ip-autodetection-methods -# calico_ip_auto_method: "interface=eth.*" -# calico_ip6_auto_method: "interface=eth.*" - -# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. -# see https://projectcalico.docs.tigera.io/reference/felix/configuration -# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" - -calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" - -kube_etcd_cacert_file: ca.pem -kube_etcd_cert_file: node-{{ inventory_hostname }}.pem -kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem - -# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) -# The default value for calico_datastore is set in role kubespray-default - -# Use typha (only with kdd) -typha_enabled: false -typha_prometheusmetricsenabled: false -typha_prometheusmetricsport: 9093 - -# Scaling typha: 1 replica per 100 nodes is adequate -# Number of typha replicas -typha_replicas: 1 - -# Set max typha connections -typha_max_connections_lower_limit: 300 - -# Generate certifcates for typha<->calico-node communication -typha_secure: false - -calico_feature_control: {} - -# Calico default BGP port -calico_bgp_listen_port: 179 - -# Calico FelixConfiguration options -calico_felix_reporting_interval: 0s -calico_felix_log_severity_screen: Info - -# Calico container settings -calico_allow_ip_forwarding: false - -# Calico IPAM strictAffinity -calico_ipam_strictaffinity: false - -# Calico IPAM autoAllocateBlocks -calico_ipam_autoallocateblocks: true - -# Calico IPAM maxBlocksPerHost, default 0 -calico_ipam_maxblocksperhost: 0 - -# Calico host local IPAM (use node .spec.podCIDR) - -calico_ipam_host_local: false - -# Calico apiserver (only with kdd) -calico_apiserver_enabled: false - -# Calico feature detect override -calico_feature_detect_override: "" - -# Calico kubeconfig wait timeout in seconds -calico_kubeconfig_wait_timeout: 300 diff --git a/roles/network_plugin/cni/defaults/main.yml b/roles/network_plugin/cni/defaults/main.yml deleted file mode 100644 index 5d11edfa380..00000000000 --- a/roles/network_plugin/cni/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -cni_bin_owner: "{{ kube_owner }}" diff --git a/roles/network_plugin/cni/tasks/main.yml b/roles/network_plugin/cni/tasks/main.yml deleted file mode 100644 index 28376bd7605..00000000000 --- a/roles/network_plugin/cni/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: CNI | make sure /opt/cni/bin exists - file: - path: /opt/cni/bin - state: directory - mode: "0755" - owner: "{{ cni_bin_owner }}" - recurse: true - -- name: CNI | Copy cni plugins - unarchive: - src: "{{ downloads.cni.dest }}" - dest: "/opt/cni/bin" - mode: "0755" - owner: "{{ cni_bin_owner }}" - remote_src: true diff --git a/roles/network_plugin/custom_cni/defaults/main.yml b/roles/network_plugin/custom_cni/defaults/main.yml deleted file mode 100644 index 0eab14374e3..00000000000 --- a/roles/network_plugin/custom_cni/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -custom_cni_manifests: [] - -custom_cni_chart_namespace: kube-system -custom_cni_chart_release_name: "" -custom_cni_chart_repository_name: "" -custom_cni_chart_repository_url: "" -custom_cni_chart_ref: "" -custom_cni_chart_version: "" -custom_cni_chart_values: {} diff --git a/roles/network_plugin/custom_cni/meta/main.yml b/roles/network_plugin/custom_cni/meta/main.yml deleted file mode 100644 index 361c406de2a..00000000000 --- a/roles/network_plugin/custom_cni/meta/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -dependencies: - - role: helm-apps - when: - - inventory_hostname == groups['kube_control_plane'][0] - - custom_cni_chart_release_name | length > 0 - environment: - http_proxy: "{{ http_proxy | default('') }}" - https_proxy: "{{ https_proxy | default('') }}" - release_common_opts: {} - releases: - - name: "{{ custom_cni_chart_release_name }}" - namespace: "{{ custom_cni_chart_namespace }}" - chart_ref: "{{ custom_cni_chart_ref }}" - chart_version: "{{ custom_cni_chart_version }}" - wait: true - values: "{{ custom_cni_chart_values }}" - repositories: - - name: "{{ custom_cni_chart_repository_name }}" - url: "{{ custom_cni_chart_repository_url }}" diff --git a/roles/network_plugin/custom_cni/tasks/main.yml b/roles/network_plugin/custom_cni/tasks/main.yml deleted file mode 100644 index a1397c8281f..00000000000 --- a/roles/network_plugin/custom_cni/tasks/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: Custom CNI | Manifest deployment - when: not custom_cni_chart_release_name | length > 0 - block: - - name: Custom CNI | Check Custom CNI Manifests - assert: - that: - - "custom_cni_manifests | length > 0" - msg: "custom_cni_manifests should not be empty" - - - name: Custom CNI | Copy Custom manifests - template: - src: "{{ item }}" - dest: "{{ kube_config_dir }}/{{ item | basename | replace('.j2', '') }}" - mode: "0644" - loop: "{{ custom_cni_manifests }}" - delegate_to: "{{ groups['kube_control_plane'] | first }}" - run_once: true - - - name: Custom CNI | Start Resources - kube: - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/{{ item | basename | replace('.j2', '') }}" - state: "latest" - wait: true - loop: "{{ custom_cni_manifests }}" - delegate_to: "{{ groups['kube_control_plane'] | first }}" - run_once: true diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml deleted file mode 100644 index 16ada70030f..00000000000 --- a/roles/network_plugin/flannel/defaults/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# Flannel public IP -# The address that flannel should advertise as how to access the system -# Disabled until https://github.com/coreos/flannel/issues/712 is fixed -# flannel_public_ip: "{{ main_access_ip }}" - -## interface that should be used for flannel operations -## This is actually an inventory cluster-level item -# flannel_interface: - -## Select interface that should be used for flannel operations by regexp on Name or IP -## This is actually an inventory cluster-level item -## example: select interface with ip from net 10.0.0.0/23 -## single quote and escape backslashes -# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' - -# You can choose what type of flannel backend to use -# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md -flannel_backend_type: "vxlan" -flannel_vxlan_vni: 1 -flannel_vxlan_port: 8472 -flannel_vxlan_direct_routing: false - -# Limits for apps -flannel_memory_limit: 500M -flannel_cpu_limit: 300m -flannel_memory_requests: 64M -flannel_cpu_requests: 150m diff --git a/roles/network_plugin/flannel/meta/main.yml b/roles/network_plugin/flannel/meta/main.yml deleted file mode 100644 index 9b7065f1854..00000000000 --- a/roles/network_plugin/flannel/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: network_plugin/cni diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml deleted file mode 100644 index 8fea555e44f..00000000000 --- a/roles/network_plugin/flannel/tasks/main.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- - -- name: Flannel | Stop if kernel version is too low for Flannel Wireguard encryption - assert: - that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') - when: - - kube_network_plugin == 'flannel' - - flannel_backend_type == 'wireguard' - - not ignore_assert_errors - -- name: Flannel | Create Flannel manifests - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: flannel, file: cni-flannel-rbac.yml, type: sa} - - {name: kube-flannel, file: cni-flannel.yml, type: ds} - register: flannel_node_manifests - when: - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Flannel | Start Resources - kube: - name: "{{ item.item.name }}" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - with_items: "{{ flannel_node_manifests.results }}" - when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - -- name: Flannel | Wait for flannel subnet.env file presence - wait_for: - path: /run/flannel/subnet.env - delay: 5 - timeout: 600 diff --git a/roles/network_plugin/flannel/tasks/reset.yml b/roles/network_plugin/flannel/tasks/reset.yml deleted file mode 100644 index c4b1b881581..00000000000 --- a/roles/network_plugin/flannel/tasks/reset.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Reset | check cni network device - stat: - path: /sys/class/net/cni0 - get_attributes: false - get_checksum: false - get_mime: false - register: cni - -- name: Reset | remove the network device created by the flannel - command: ip link del cni0 - when: cni.stat.exists - -- name: Reset | check flannel network device - stat: - path: /sys/class/net/flannel.1 - get_attributes: false - get_checksum: false - get_mime: false - register: flannel - -- name: Reset | remove the network device created by the flannel - command: ip link del flannel.1 - when: flannel.stat.exists diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 deleted file mode 100644 index 631ec5eb6c9..00000000000 --- a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 +++ /dev/null @@ -1,52 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch -- apiGroups: - - "networking.k8s.io" - resources: - - clustercidrs - verbs: - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 deleted file mode 100644 index da4cfcde5b0..00000000000 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ /dev/null @@ -1,172 +0,0 @@ ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - tier: node - app: flannel -data: - cni-conf.json: | - { - "name": "cbr0", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] - } - net-conf.json: | - { -{% if ipv4_stack %} - "Network": "{{ kube_pods_subnet }}", - "EnableIPv4": true, -{% endif %} -{% if ipv6_stack %} - "EnableIPv6": true, - "IPv6Network": "{{ kube_pods_subnet_ipv6 }}", -{% endif %} - "Backend": { - "Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %}, - "VNI": {{ flannel_vxlan_vni }}, - "Port": {{ flannel_vxlan_port }}, - "DirectRouting": {{ flannel_vxlan_direct_routing | to_json }} -{% endif %} - } - } -{% for arch in ['amd64', 'arm64', 'arm', 'ppc64le', 's390x'] %} ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: -{% if arch == 'amd64' %} - name: kube-flannel -{% else %} - name: kube-flannel-ds-{{ arch }} -{% endif %} - namespace: kube-system - labels: - tier: node - app: flannel -spec: - selector: - matchLabels: - app: flannel - template: - metadata: - labels: - tier: node - app: flannel - spec: - priorityClassName: system-node-critical - serviceAccountName: flannel - containers: - - name: kube-flannel - image: {{ flannel_image_repo }}:{{ flannel_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - resources: - limits: - cpu: {{ flannel_cpu_limit }} - memory: {{ flannel_memory_limit }} - requests: - cpu: {{ flannel_cpu_requests }} - memory: {{ flannel_memory_requests }} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ] - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN", "NET_RAW"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: EVENT_QUEUE_DEPTH - value: "5000" - volumeMounts: - - name: run - mountPath: /run/flannel - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: xtables-lock - mountPath: /run/xtables.lock - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: kubernetes.io/arch - operator: In - values: - - {{ arch }} - initContainers: - - name: install-cni-plugin - image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag }} - command: - - cp - args: - - -f - - /flannel - - /opt/cni/bin/flannel - volumeMounts: - - name: cni-plugin - mountPath: /opt/cni/bin - - name: install-cni - image: {{ flannel_image_repo }}:{{ flannel_image_tag }} - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - tolerations: - - operator: Exists - volumes: - - name: run - hostPath: - path: /run/flannel - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: cni-plugin - hostPath: - path: /opt/cni/bin - updateStrategy: - rollingUpdate: - maxUnavailable: {{ serial | default('20%') }} - type: RollingUpdate -{% endfor %} diff --git a/roles/network_plugin/kube-ovn/defaults/main.yml b/roles/network_plugin/kube-ovn/defaults/main.yml deleted file mode 100644 index 4262a775b27..00000000000 --- a/roles/network_plugin/kube-ovn/defaults/main.yml +++ /dev/null @@ -1,135 +0,0 @@ ---- -kube_ovn_db_cpu_request: 500m -kube_ovn_db_memory_request: 200Mi -kube_ovn_db_cpu_limit: 3000m -kube_ovn_db_memory_limit: 3000Mi -kube_ovn_node_cpu_request: 200m -kube_ovn_node_memory_request: 200Mi -kube_ovn_node_cpu_limit: 1000m -kube_ovn_node_memory_limit: 800Mi -kube_ovn_cni_server_cpu_request: 200m -kube_ovn_cni_server_memory_request: 200Mi -kube_ovn_cni_server_cpu_limit: 1000m -kube_ovn_cni_server_memory_limit: 1Gi -kube_ovn_controller_cpu_request: 200m -kube_ovn_controller_memory_request: 200Mi -kube_ovn_controller_cpu_limit: 1000m -kube_ovn_controller_memory_limit: 1Gi -kube_ovn_pinger_cpu_request: 100m -kube_ovn_pinger_memory_request: 200Mi -kube_ovn_pinger_cpu_limit: 200m -kube_ovn_pinger_memory_limit: 400Mi -kube_ovn_monitor_memory_request: 200Mi -kube_ovn_monitor_cpu_request: 200m -kube_ovn_monitor_memory_limit: 200Mi -kube_ovn_monitor_cpu_limit: 200m -kube_ovn_dpdk_node_cpu_request: 1000m -kube_ovn_dpdk_node_memory_request: 2Gi -kube_ovn_dpdk_node_cpu_limit: 1000m -kube_ovn_dpdk_node_memory_limit: 2Gi - -kube_ovn_central_hosts: "{{ groups['kube_control_plane'] }}" -kube_ovn_central_replics: "{{ kube_ovn_central_hosts | length }}" -kube_ovn_controller_replics: "{{ kube_ovn_central_hosts | length }}" -kube_ovn_central_ips: |- - {% for item in kube_ovn_central_hosts -%} - {{ hostvars[item]['main_ip'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - -kube_ovn_ic_enable: false -kube_ovn_ic_autoroute: true -kube_ovn_ic_dbhost: "127.0.0.1" -kube_ovn_ic_zone: "kubernetes" - -# geneve or vlan -kube_ovn_network_type: geneve - -# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module -kube_ovn_tunnel_type: geneve - -## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. -# kube_ovn_iface: eth1 -## The MTU used by pod iface in overlay networks (default iface MTU - 100) -# kube_ovn_mtu: 1333 - -## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. -kube_ovn_hw_offload: false -# traffic mirror -kube_ovn_traffic_mirror: false - -# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 -# kube_ovn_default_interface_name: eth0 - -kube_ovn_external_address: 8.8.8.8 -kube_ovn_external_address_ipv6: 2400:3200::1 -kube_ovn_external_address_merged: >- - {%- if ipv4_stack and ipv6_stack -%} - {{ kube_ovn_external_address }},{{ kube_ovn_external_address_ipv6 }} - {%- elif ipv4_stack -%} - {{ kube_ovn_external_address }} - {%- else -%} - {{ kube_ovn_external_address_ipv6 }} - {%- endif -%} - -kube_ovn_external_dns: alauda.cn - -# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 -kube_ovn_default_gateway_check: true -kube_ovn_default_logical_gateway: false - -# u2o_interconnection -kube_ovn_u2o_interconnection: false - -# kube_ovn_default_exclude_ips: 10.16.0.1 -kube_ovn_node_switch_cidr: 100.64.0.0/16 -kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 -kube_ovn_node_switch_cidr_merged: >- - {%- if ipv4_stack and ipv6_stack -%} - {{ kube_ovn_node_switch_cidr }},{{ kube_ovn_node_switch_cidr_ipv6 }} - {%- elif ipv4_stack -%} - {{ kube_ovn_node_switch_cidr }} - {%- else -%} - {{ kube_ovn_node_switch_cidr_ipv6 }} - {%- endif -%} - -## vlan config, set default interface name and vlan id -# kube_ovn_default_interface_name: eth0 -kube_ovn_default_vlan_id: 100 -kube_ovn_vlan_name: product - -## pod nic type, support: veth-pair or internal-port -kube_ovn_pod_nic_type: veth_pair - -## Enable load balancer -kube_ovn_enable_lb: true - -## Enable network policy support -kube_ovn_enable_np: true - -## Enable external vpc support -kube_ovn_enable_external_vpc: true - -## Enable checksum -kube_ovn_encap_checksum: true - -## enable ssl -kube_ovn_enable_ssl: false - -## dpdk -kube_ovn_dpdk_enabled: false -kube_ovn_dpdk_tunnel_iface: br-phy - -## bind local ip -kube_ovn_bind_local_ip_enabled: true - -## eip snat -kube_ovn_eip_snat_enabled: true - -# ls dnat mod dl dst -kube_ovn_ls_dnat_mod_dl_dst: true - -## keep vm ip -kube_ovn_keep_vm_ip: true - -## cni config priority, default: 01 -kube_ovn_cni_config_priority: '01' diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml deleted file mode 100644 index 3d278462c89..00000000000 --- a/roles/network_plugin/kube-ovn/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Kube-OVN | Label ovn-db node - command: "{{ kubectl }} label --overwrite node {{ item }} kube-ovn/role=master" - loop: "{{ kube_ovn_central_hosts }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Kube-OVN | Create Kube-OVN manifests - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: kube-ovn-crd, file: cni-kube-ovn-crd.yml} - - {name: ovn, file: cni-ovn.yml} - - {name: kube-ovn, file: cni-kube-ovn.yml} - register: kube_ovn_node_manifests - -- name: Kube-OVN | Start Resources - kube: - name: "{{ item.item.name }}" - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - with_items: "{{ kube_ovn_node_manifests.results }}" - when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 deleted file mode 100644 index c531ffcbb1a..00000000000 --- a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 +++ /dev/null @@ -1,2587 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: vpc-dnses.kubeovn.io -spec: - group: kubeovn.io - names: - plural: vpc-dnses - singular: vpc-dns - shortNames: - - vpc-dns - kind: VpcDns - listKind: VpcDnsList - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.active - name: Active - type: boolean - - jsonPath: .spec.vpc - name: Vpc - type: string - - jsonPath: .spec.subnet - name: Subnet - type: string - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - vpc: - type: string - subnet: - type: string - replicas: - type: integer - minimum: 1 - maximum: 3 - status: - type: object - properties: - active: - type: boolean - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: switch-lb-rules.kubeovn.io -spec: - group: kubeovn.io - names: - plural: switch-lb-rules - singular: switch-lb-rule - shortNames: - - slr - kind: SwitchLBRule - listKind: SwitchLBRuleList - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.vip - name: vip - type: string - - jsonPath: .status.ports - name: port(s) - type: string - - jsonPath: .status.service - name: service - type: string - - jsonPath: .metadata.creationTimestamp - name: age - type: date - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - namespace: - type: string - vip: - type: string - sessionAffinity: - type: string - ports: - items: - properties: - name: - type: string - port: - type: integer - minimum: 1 - maximum: 65535 - protocol: - type: string - targetPort: - type: integer - minimum: 1 - maximum: 65535 - type: object - type: array - selector: - items: - type: string - type: array - endpoints: - items: - type: string - type: array - status: - type: object - properties: - ports: - type: string - service: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: vpc-nat-gateways.kubeovn.io -spec: - group: kubeovn.io - names: - plural: vpc-nat-gateways - singular: vpc-nat-gateway - shortNames: - - vpc-nat-gw - kind: VpcNatGateway - listKind: VpcNatGatewayList - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.vpc - name: Vpc - type: string - - jsonPath: .spec.subnet - name: Subnet - type: string - - jsonPath: .spec.lanIp - name: LanIP - type: string - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - externalSubnets: - items: - type: string - type: array - selector: - type: array - items: - type: string - qosPolicy: - type: string - tolerations: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - enum: - - Equal - - Exists - value: - type: string - effect: - type: string - enum: - - NoExecute - - NoSchedule - - PreferNoSchedule - tolerationSeconds: - type: integer - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - spec: - type: object - properties: - lanIp: - type: string - subnet: - type: string - externalSubnets: - items: - type: string - type: array - vpc: - type: string - selector: - type: array - items: - type: string - qosPolicy: - type: string - tolerations: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - enum: - - Equal - - Exists - value: - type: string - effect: - type: string - enum: - - NoExecute - - NoSchedule - - PreferNoSchedule - tolerationSeconds: - type: integer - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - x-kubernetes-patch-strategy: merge - x-kubernetes-patch-merge-key: key - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: iptables-eips.kubeovn.io -spec: - group: kubeovn.io - names: - plural: iptables-eips - singular: iptables-eip - shortNames: - - eip - kind: IptablesEIP - listKind: IptablesEIPList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .status.ip - name: IP - type: string - - jsonPath: .spec.macAddress - name: Mac - type: string - - jsonPath: .status.nat - name: Nat - type: string - - jsonPath: .spec.natGwDp - name: NatGwDp - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - ready: - type: boolean - ip: - type: string - nat: - type: string - redo: - type: string - qosPolicy: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - v4ip: - type: string - v6ip: - type: string - macAddress: - type: string - natGwDp: - type: string - qosPolicy: - type: string - externalSubnet: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: iptables-fip-rules.kubeovn.io -spec: - group: kubeovn.io - names: - plural: iptables-fip-rules - singular: iptables-fip-rule - shortNames: - - fip - kind: IptablesFIPRule - listKind: IptablesFIPRuleList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.eip - name: Eip - type: string - - jsonPath: .status.v4ip - name: V4ip - type: string - - jsonPath: .spec.internalIp - name: InternalIp - type: string - - jsonPath: .status.v6ip - name: V6ip - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - - jsonPath: .status.natGwDp - name: NatGwDp - type: string - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - ready: - type: boolean - v4ip: - type: string - v6ip: - type: string - natGwDp: - type: string - redo: - type: string - internalIp: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - eip: - type: string - internalIp: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: iptables-dnat-rules.kubeovn.io -spec: - group: kubeovn.io - names: - plural: iptables-dnat-rules - singular: iptables-dnat-rule - shortNames: - - dnat - kind: IptablesDnatRule - listKind: IptablesDnatRuleList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.eip - name: Eip - type: string - - jsonPath: .spec.protocol - name: Protocol - type: string - - jsonPath: .status.v4ip - name: V4ip - type: string - - jsonPath: .status.v6ip - name: V6ip - type: string - - jsonPath: .spec.internalIp - name: InternalIp - type: string - - jsonPath: .spec.externalPort - name: ExternalPort - type: string - - jsonPath: .spec.internalPort - name: InternalPort - type: string - - jsonPath: .status.natGwDp - name: NatGwDp - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - ready: - type: boolean - v4ip: - type: string - v6ip: - type: string - natGwDp: - type: string - redo: - type: string - protocol: - type: string - internalIp: - type: string - internalPort: - type: string - externalPort: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - eip: - type: string - externalPort: - type: string - protocol: - type: string - internalIp: - type: string - internalPort: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: iptables-snat-rules.kubeovn.io -spec: - group: kubeovn.io - names: - plural: iptables-snat-rules - singular: iptables-snat-rule - shortNames: - - snat - kind: IptablesSnatRule - listKind: IptablesSnatRuleList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.eip - name: EIP - type: string - - jsonPath: .status.v4ip - name: V4ip - type: string - - jsonPath: .status.v6ip - name: V6ip - type: string - - jsonPath: .spec.internalCIDR - name: InternalCIDR - type: string - - jsonPath: .status.natGwDp - name: NatGwDp - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - ready: - type: boolean - v4ip: - type: string - v6ip: - type: string - natGwDp: - type: string - redo: - type: string - internalCIDR: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - eip: - type: string - internalCIDR: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: ovn-eips.kubeovn.io -spec: - group: kubeovn.io - names: - plural: ovn-eips - singular: ovn-eip - shortNames: - - oeip - kind: OvnEip - listKind: OvnEipList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .status.v4Ip - name: V4IP - type: string - - jsonPath: .status.v6Ip - name: V6IP - type: string - - jsonPath: .status.macAddress - name: Mac - type: string - - jsonPath: .status.type - name: Type - type: string - - jsonPath: .status.nat - name: Nat - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - type: - type: string - nat: - type: string - ready: - type: boolean - v4Ip: - type: string - v6Ip: - type: string - macAddress: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - externalSubnet: - type: string - type: - type: string - v4Ip: - type: string - v6Ip: - type: string - macAddress: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: ovn-fips.kubeovn.io -spec: - group: kubeovn.io - names: - plural: ovn-fips - singular: ovn-fip - shortNames: - - ofip - kind: OvnFip - listKind: OvnFipList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .status.vpc - name: Vpc - type: string - - jsonPath: .status.v4Eip - name: V4Eip - type: string - - jsonPath: .status.v4Ip - name: V4Ip - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - - jsonPath: .spec.ipType - name: IpType - type: string - - jsonPath: .spec.ipName - name: IpName - type: string - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - ready: - type: boolean - v4Eip: - type: string - v4Ip: - type: string - vpc: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - ovnEip: - type: string - ipType: - type: string - ipName: - type: string - vpc: - type: string - v4Ip: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: ovn-snat-rules.kubeovn.io -spec: - group: kubeovn.io - names: - plural: ovn-snat-rules - singular: ovn-snat-rule - shortNames: - - osnat - kind: OvnSnatRule - listKind: OvnSnatRuleList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .status.vpc - name: Vpc - type: string - - jsonPath: .status.v4Eip - name: V4Eip - type: string - - jsonPath: .status.v4IpCidr - name: V4IpCidr - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - ready: - type: boolean - v4Eip: - type: string - v4IpCidr: - type: string - vpc: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - ovnEip: - type: string - vpcSubnet: - type: string - ipName: - type: string - vpc: - type: string - v4IpCidr: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: ovn-dnat-rules.kubeovn.io -spec: - group: kubeovn.io - names: - plural: ovn-dnat-rules - singular: ovn-dnat-rule - shortNames: - - odnat - kind: OvnDnatRule - listKind: OvnDnatRuleList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .status.vpc - name: Vpc - type: string - - jsonPath: .spec.ovnEip - name: Eip - type: string - - jsonPath: .status.protocol - name: Protocol - type: string - - jsonPath: .status.v4Eip - name: V4Eip - type: string - - jsonPath: .status.v4Ip - name: V4Ip - type: string - - jsonPath: .status.internalPort - name: InternalPort - type: string - - jsonPath: .status.externalPort - name: ExternalPort - type: string - - jsonPath: .spec.ipName - name: IpName - type: string - - jsonPath: .status.ready - name: Ready - type: boolean - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - ready: - type: boolean - v4Eip: - type: string - v4Ip: - type: string - vpc: - type: string - externalPort: - type: string - internalPort: - type: string - protocol: - type: string - ipName: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - ovnEip: - type: string - ipType: - type: string - ipName: - type: string - externalPort: - type: string - internalPort: - type: string - protocol: - type: string - vpc: - type: string - v4Ip: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: vpcs.kubeovn.io -spec: - group: kubeovn.io - versions: - - additionalPrinterColumns: - - jsonPath: .status.enableExternal - name: EnableExternal - type: boolean - - jsonPath: .status.enableBfd - name: EnableBfd - type: boolean - - jsonPath: .status.standby - name: Standby - type: boolean - - jsonPath: .status.subnets - name: Subnets - type: string - - jsonPath: .status.extraExternalSubnets - name: ExtraExternalSubnets - type: string - - jsonPath: .spec.namespaces - name: Namespaces - type: string - name: v1 - schema: - openAPIV3Schema: - properties: - spec: - properties: - enableExternal: - type: boolean - enableBfd: - type: boolean - namespaces: - items: - type: string - type: array - extraExternalSubnets: - items: - type: string - type: array - staticRoutes: - items: - properties: - policy: - type: string - cidr: - type: string - nextHopIP: - type: string - ecmpMode: - type: string - bfdId: - type: string - routeTable: - type: string - type: object - type: array - policyRoutes: - items: - properties: - priority: - type: integer - action: - type: string - match: - type: string - nextHopIP: - type: string - type: object - type: array - vpcPeerings: - items: - properties: - remoteVpc: - type: string - localConnectIP: - type: string - type: object - type: array - type: object - status: - properties: - conditions: - items: - properties: - lastTransitionTime: - type: string - lastUpdateTime: - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - type: object - type: array - default: - type: boolean - defaultLogicalSwitch: - type: string - router: - type: string - standby: - type: boolean - enableExternal: - type: boolean - enableBfd: - type: boolean - subnets: - items: - type: string - type: array - extraExternalSubnets: - items: - type: string - type: array - vpcPeerings: - items: - type: string - type: array - tcpLoadBalancer: - type: string - tcpSessionLoadBalancer: - type: string - udpLoadBalancer: - type: string - udpSessionLoadBalancer: - type: string - sctpLoadBalancer: - type: string - sctpSessionLoadBalancer: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - names: - kind: Vpc - listKind: VpcList - plural: vpcs - shortNames: - - vpc - singular: vpc - scope: Cluster ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: ips.kubeovn.io -spec: - group: kubeovn.io - versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: V4IP - type: string - jsonPath: .spec.v4IpAddress - - name: V6IP - type: string - jsonPath: .spec.v6IpAddress - - name: Mac - type: string - jsonPath: .spec.macAddress - - name: Node - type: string - jsonPath: .spec.nodeName - - name: Subnet - type: string - jsonPath: .spec.subnet - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - podName: - type: string - namespace: - type: string - subnet: - type: string - attachSubnets: - type: array - items: - type: string - nodeName: - type: string - ipAddress: - type: string - v4IpAddress: - type: string - v6IpAddress: - type: string - attachIps: - type: array - items: - type: string - macAddress: - type: string - attachMacs: - type: array - items: - type: string - containerID: - type: string - podType: - type: string - scope: Cluster - names: - plural: ips - singular: ip - kind: IP - shortNames: - - ip ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: vips.kubeovn.io -spec: - group: kubeovn.io - names: - plural: vips - singular: vip - shortNames: - - vip - kind: Vip - listKind: VipList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: V4IP - type: string - jsonPath: .status.v4ip - - name: V6IP - type: string - jsonPath: .status.v6ip - - name: Mac - type: string - jsonPath: .status.mac - - name: PMac - type: string - jsonPath: .spec.parentMac - - name: Subnet - type: string - jsonPath: .spec.subnet - - jsonPath: .status.ready - name: Ready - type: boolean - - jsonPath: .status.type - name: Type - type: string - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - type: - type: string - ready: - type: boolean - v4ip: - type: string - v6ip: - type: string - mac: - type: string - pv4ip: - type: string - pv6ip: - type: string - pmac: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - namespace: - type: string - subnet: - type: string - type: - type: string - attachSubnets: - type: array - items: - type: string - v4ip: - type: string - macAddress: - type: string - v6ip: - type: string - parentV4ip: - type: string - parentMac: - type: string - parentV6ip: - type: string ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: subnets.kubeovn.io -spec: - group: kubeovn.io - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - name: Provider - type: string - jsonPath: .spec.provider - - name: Vpc - type: string - jsonPath: .spec.vpc - - name: Protocol - type: string - jsonPath: .spec.protocol - - name: CIDR - type: string - jsonPath: .spec.cidrBlock - - name: Private - type: boolean - jsonPath: .spec.private - - name: NAT - type: boolean - jsonPath: .spec.natOutgoing - - name: Default - type: boolean - jsonPath: .spec.default - - name: GatewayType - type: string - jsonPath: .spec.gatewayType - - name: V4Used - type: number - jsonPath: .status.v4usingIPs - - name: V4Available - type: number - jsonPath: .status.v4availableIPs - - name: V6Used - type: number - jsonPath: .status.v6usingIPs - - name: V6Available - type: number - jsonPath: .status.v6availableIPs - - name: ExcludeIPs - type: string - jsonPath: .spec.excludeIps - - name: U2OInterconnectionIP - type: string - jsonPath: .status.u2oInterconnectionIP - schema: - openAPIV3Schema: - type: object - properties: - metadata: - type: object - properties: - name: - type: string - pattern: ^[^0-9] - status: - type: object - properties: - v4availableIPs: - type: number - v4usingIPs: - type: number - v6availableIPs: - type: number - v6usingIPs: - type: number - activateGateway: - type: string - dhcpV4OptionsUUID: - type: string - dhcpV6OptionsUUID: - type: string - u2oInterconnectionIP: - type: string - u2oInterconnectionVPC: - type: string - v4usingIPrange: - type: string - v4availableIPrange: - type: string - v6usingIPrange: - type: string - v6availableIPrange: - type: string - natOutgoingPolicyRules: - type: array - items: - type: object - properties: - ruleID: - type: string - action: - type: string - enum: - - nat - - forward - match: - type: object - properties: - srcIPs: - type: string - dstIPs: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - vpc: - type: string - default: - type: boolean - protocol: - type: string - enum: - - IPv4 - - IPv6 - - Dual - cidrBlock: - type: string - namespaces: - type: array - items: - type: string - gateway: - type: string - provider: - type: string - excludeIps: - type: array - items: - type: string - vips: - type: array - items: - type: string - gatewayType: - type: string - allowSubnets: - type: array - items: - type: string - gatewayNode: - type: string - natOutgoing: - type: boolean - externalEgressGateway: - type: string - policyRoutingPriority: - type: integer - minimum: 1 - maximum: 32765 - policyRoutingTableID: - type: integer - minimum: 1 - maximum: 2147483647 - not: - enum: - - 252 # compat - - 253 # default - - 254 # main - - 255 # local - mtu: - type: integer - minimum: 68 - maximum: 65535 - private: - type: boolean - vlan: - type: string - logicalGateway: - type: boolean - disableGatewayCheck: - type: boolean - disableInterConnection: - type: boolean - enableDHCP: - type: boolean - dhcpV4Options: - type: string - dhcpV6Options: - type: string - enableIPv6RA: - type: boolean - ipv6RAConfigs: - type: string - acls: - type: array - items: - type: object - properties: - direction: - type: string - enum: - - from-lport - - to-lport - priority: - type: integer - minimum: 0 - maximum: 32767 - match: - type: string - action: - type: string - enum: - - allow-related - - allow-stateless - - allow - - drop - - reject - natOutgoingPolicyRules: - type: array - items: - type: object - properties: - action: - type: string - enum: - - nat - - forward - match: - type: object - properties: - srcIPs: - type: string - dstIPs: - type: string - u2oInterconnection: - type: boolean - u2oInterconnectionIP: - type: string - enableLb: - type: boolean - enableEcmp: - type: boolean - enableMulticastSnoop: - type: boolean - routeTable: - type: string - scope: Cluster - names: - plural: subnets - singular: subnet - kind: Subnet - shortNames: - - subnet ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: ippools.kubeovn.io -spec: - group: kubeovn.io - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - name: Subnet - type: string - jsonPath: .spec.subnet - - name: IPs - type: string - jsonPath: .spec.ips - - name: V4Used - type: number - jsonPath: .status.v4UsingIPs - - name: V4Available - type: number - jsonPath: .status.v4AvailableIPs - - name: V6Used - type: number - jsonPath: .status.v6UsingIPs - - name: V6Available - type: number - jsonPath: .status.v6AvailableIPs - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - subnet: - type: string - x-kubernetes-validations: - - rule: "self == oldSelf" - message: "This field is immutable." - namespaces: - type: array - x-kubernetes-list-type: set - items: - type: string - ips: - type: array - minItems: 1 - x-kubernetes-list-type: set - items: - type: string - anyOf: - - format: ipv4 - - format: ipv6 - - format: cidr - - pattern: ^(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.\.(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])$ - - pattern: ^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))\.\.((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))$ - required: - - subnet - - ips - status: - type: object - properties: - v4AvailableIPs: - type: number - v4UsingIPs: - type: number - v6AvailableIPs: - type: number - v6UsingIPs: - type: number - v4AvailableIPRange: - type: string - v4UsingIPRange: - type: string - v6AvailableIPRange: - type: string - v6UsingIPRange: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - scope: Cluster - names: - plural: ippools - singular: ippool - kind: IPPool - shortNames: - - ippool ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: vlans.kubeovn.io -spec: - group: kubeovn.io - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - id: - type: integer - minimum: 0 - maximum: 4095 - provider: - type: string - vlanId: - type: integer - description: Deprecated in favor of id - providerInterfaceName: - type: string - description: Deprecated in favor of provider - required: - - provider - status: - type: object - properties: - subnets: - type: array - items: - type: string - additionalPrinterColumns: - - name: ID - type: string - jsonPath: .spec.id - - name: Provider - type: string - jsonPath: .spec.provider - scope: Cluster - names: - plural: vlans - singular: vlan - kind: Vlan - shortNames: - - vlan ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: provider-networks.kubeovn.io -spec: - group: kubeovn.io - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - properties: - metadata: - type: object - properties: - name: - type: string - maxLength: 12 - not: - enum: - - int - spec: - type: object - properties: - defaultInterface: - type: string - maxLength: 15 - pattern: '^[^/\s]+$' - customInterfaces: - type: array - items: - type: object - properties: - interface: - type: string - maxLength: 15 - pattern: '^[^/\s]+$' - nodes: - type: array - items: - type: string - exchangeLinkName: - type: boolean - excludeNodes: - type: array - items: - type: string - required: - - defaultInterface - status: - type: object - properties: - ready: - type: boolean - readyNodes: - type: array - items: - type: string - notReadyNodes: - type: array - items: - type: string - vlans: - type: array - items: - type: string - conditions: - type: array - items: - type: object - properties: - node: - type: string - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - additionalPrinterColumns: - - name: DefaultInterface - type: string - jsonPath: .spec.defaultInterface - - name: Ready - type: boolean - jsonPath: .status.ready - scope: Cluster - names: - plural: provider-networks - singular: provider-network - kind: ProviderNetwork - listKind: ProviderNetworkList ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: security-groups.kubeovn.io -spec: - group: kubeovn.io - names: - plural: security-groups - singular: security-group - shortNames: - - sg - kind: SecurityGroup - listKind: SecurityGroupList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - ingressRules: - type: array - items: - type: object - properties: - ipVersion: - type: string - protocol: - type: string - priority: - type: integer - remoteType: - type: string - remoteAddress: - type: string - remoteSecurityGroup: - type: string - portRangeMin: - type: integer - portRangeMax: - type: integer - policy: - type: string - egressRules: - type: array - items: - type: object - properties: - ipVersion: - type: string - protocol: - type: string - priority: - type: integer - remoteType: - type: string - remoteAddress: - type: string - remoteSecurityGroup: - type: string - portRangeMin: - type: integer - portRangeMax: - type: integer - policy: - type: string - allowSameGroupTraffic: - type: boolean - status: - type: object - properties: - portGroup: - type: string - allowSameGroupTraffic: - type: boolean - ingressMd5: - type: string - egressMd5: - type: string - ingressLastSyncSuccess: - type: boolean - egressLastSyncSuccess: - type: boolean - subresources: - status: {} - conversion: - strategy: None ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: qos-policies.kubeovn.io -spec: - group: kubeovn.io - names: - plural: qos-policies - singular: qos-policy - shortNames: - - qos - kind: QoSPolicy - listKind: QoSPolicyList - scope: Cluster - versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.shared - name: Shared - type: string - - jsonPath: .spec.bindingType - name: BindingType - type: string - schema: - openAPIV3Schema: - type: object - properties: - status: - type: object - properties: - shared: - type: boolean - bindingType: - type: string - bandwidthLimitRules: - type: array - items: - type: object - properties: - name: - type: string - interface: - type: string - rateMax: - type: string - burstMax: - type: string - priority: - type: integer - direction: - type: string - matchType: - type: string - matchValue: - type: string - conditions: - type: array - items: - type: object - properties: - type: - type: string - status: - type: string - reason: - type: string - message: - type: string - lastUpdateTime: - type: string - lastTransitionTime: - type: string - spec: - type: object - properties: - shared: - type: boolean - bindingType: - type: string - bandwidthLimitRules: - type: array - items: - type: object - properties: - name: - type: string - interface: - type: string - rateMax: - type: string - burstMax: - type: string - priority: - type: integer - direction: - type: string - matchType: - type: string - matchValue: - type: string - required: - - name - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 deleted file mode 100644 index b0fad2ff550..00000000000 --- a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 +++ /dev/null @@ -1,912 +0,0 @@ ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: ovn-vpc-nat-config - namespace: kube-system - annotations: - kubernetes.io/description: | - kube-ovn vpc-nat common config -data: - image: {{ kube_ovn_vpc_container_image_repo }}:{{ kube_ovn_vpc_container_image_tag }} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: ovn-vpc-nat-gw-config - namespace: kube-system -data: - enable-vpc-nat-gw: "true" ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-ovn-cni - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.k8s.io/system-only: "true" - name: system:kube-ovn-cni -rules: - - apiGroups: - - "kubeovn.io" - resources: - - subnets - - vlans - - provider-networks - verbs: - - get - - list - - watch - - apiGroups: - - "" - - "kubeovn.io" - resources: - - ovn-eips - - ovn-eips/status - - nodes - - pods - - vlans - verbs: - - get - - list - - patch - - watch - - apiGroups: - - "kubeovn.io" - resources: - - ips - verbs: - - get - - update - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create - - apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kube-ovn-cni -roleRef: - name: system:kube-ovn-cni - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: kube-ovn-cni - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kube-ovn-cni - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: - - kind: ServiceAccount - name: kube-ovn-cni - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-ovn-app - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.k8s.io/system-only: "true" - name: system:kube-ovn-app -rules: - - apiGroups: - - "" - resources: - - pods - - nodes - verbs: - - get - - list - - apiGroups: - - apps - resources: - - daemonsets - verbs: - - get - - apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create - - apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kube-ovn-app -roleRef: - name: system:kube-ovn-app - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: kube-ovn-app - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kube-ovn-app - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: - - kind: ServiceAccount - name: kube-ovn-app - namespace: kube-system ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: kube-ovn-controller - namespace: kube-system - annotations: - kubernetes.io/description: | - kube-ovn controller -spec: - replicas: {{ kube_ovn_controller_replics }} - selector: - matchLabels: - app: kube-ovn-controller - strategy: - rollingUpdate: - maxSurge: 0% - maxUnavailable: 100% - type: RollingUpdate - template: - metadata: - labels: - app: kube-ovn-controller - component: network - type: infra - spec: - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: "ovn.kubernetes.io/ic-gw" - operator: NotIn - values: - - "true" - weight: 100 - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app: kube-ovn-controller - topologyKey: kubernetes.io/hostname - priorityClassName: system-cluster-critical - serviceAccountName: ovn - hostNetwork: true - containers: - - name: kube-ovn-controller - image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - args: - - /kube-ovn/start-controller.sh - - --default-cidr={{ kube_pods_subnets }} - - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }} - - --default-gateway-check={{ kube_ovn_default_gateway_check | string }} - - --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }} - - --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }} - - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }} - - --node-switch-cidr={{ kube_ovn_node_switch_cidr_merged }} - - --service-cluster-ip-range={{ kube_service_subnets }} - - --network-type={{ kube_ovn_network_type }} - - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} - - --default-vlan-id={{ kube_ovn_default_vlan_id }} - - --ls-dnat-mod-dl-dst={{ kube_ovn_ls_dnat_mod_dl_dst }} - - --pod-nic-type={{ kube_ovn_pod_nic_type }} - - --enable-lb={{ kube_ovn_enable_lb | string }} - - --enable-np={{ kube_ovn_enable_np | string }} - - --enable-eip-snat={{ kube_ovn_eip_snat_enabled }} - - --enable-external-vpc={{ kube_ovn_enable_external_vpc | string }} - - --logtostderr=false - - --alsologtostderr=true - - --gc-interval=360 - - --inspect-interval=20 - - --log_file=/var/log/kube-ovn/kube-ovn-controller.log - - --log_file_max_size=0 - - --enable-lb-svc=false - - --keep-vm-ip={{ kube_ovn_keep_vm_ip }} - securityContext: - runAsUser: 0 - privileged: false - capabilities: - add: - - NET_BIND_SERVICE - env: - - name: ENABLE_SSL - value: "{{ kube_ovn_enable_ssl | lower }}" - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: KUBE_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: OVN_DB_IPS - value: "{{ kube_ovn_central_ips }}" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_IPS - valueFrom: - fieldRef: - fieldPath: status.podIPs - - name: ENABLE_BIND_LOCAL_IP - value: "{{ kube_ovn_bind_local_ip_enabled }}" - volumeMounts: - - mountPath: /etc/localtime - name: localtime - - mountPath: /var/log/kube-ovn - name: kube-ovn-log - - mountPath: /var/log/ovn - name: ovn-log - - mountPath: /var/run/tls - name: kube-ovn-tls - readinessProbe: - exec: - command: - - /kube-ovn/kube-ovn-healthcheck - - --port=10660 - - --tls=false - periodSeconds: 3 - timeoutSeconds: 45 - livenessProbe: - exec: - command: - - /kube-ovn/kube-ovn-healthcheck - - --port=10660 - - --tls=false - initialDelaySeconds: 300 - periodSeconds: 7 - failureThreshold: 5 - timeoutSeconds: 45 - resources: - requests: - cpu: {{ kube_ovn_controller_cpu_request }} - memory: {{ kube_ovn_controller_memory_request }} - limits: - cpu: {{ kube_ovn_controller_cpu_limit }} - memory: {{ kube_ovn_controller_memory_limit }} - nodeSelector: - kubernetes.io/os: "linux" - volumes: - - name: localtime - hostPath: - path: /etc/localtime - - name: kube-ovn-log - hostPath: - path: /var/log/kube-ovn - - name: ovn-log - hostPath: - path: /var/log/ovn - - name: kube-ovn-tls - secret: - optional: true - secretName: kube-ovn-tls - ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: kube-ovn-cni - namespace: kube-system - annotations: - kubernetes.io/description: | - This daemon set launches the kube-ovn cni daemon. -spec: - selector: - matchLabels: - app: kube-ovn-cni - template: - metadata: - labels: - app: kube-ovn-cni - component: network - type: infra - spec: - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - priorityClassName: system-node-critical - serviceAccountName: kube-ovn-cni - hostNetwork: true - hostPID: true - initContainers: - - name: install-cni - image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - command: ["/kube-ovn/install-cni.sh"] - securityContext: - runAsUser: 0 - privileged: true - volumeMounts: - - mountPath: /opt/cni/bin - name: cni-bin - - mountPath: /usr/local/bin - name: local-bin - containers: - - name: cni-server - image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - command: - - bash - - /kube-ovn/start-cniserver.sh - args: - - --enable-mirror={{ kube_ovn_traffic_mirror | lower }} - - --encap-checksum={{ kube_ovn_encap_checksum | lower }} - - --service-cluster-ip-range={{ kube_service_subnets }} - - --iface={{ kube_ovn_iface | default('') }} - - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }} - - --network-type={{ kube_ovn_network_type }} - - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} - {% if kube_ovn_mtu is defined %} - - --mtu={{ kube_ovn_mtu }} -{% endif %} - - --cni-conf-name={{ kube_ovn_cni_config_priority }}-kube-ovn.conflist - - --logtostderr=false - - --alsologtostderr=true - - --log_file=/var/log/kube-ovn/kube-ovn-cni.log - - --log_file_max_size=0 - securityContext: - runAsUser: 0 - privileged: false - capabilities: - add: - - NET_ADMIN - - NET_BIND_SERVICE - - NET_RAW - - SYS_ADMIN - env: - - name: ENABLE_SSL - value: "{{ kube_ovn_enable_ssl | lower }}" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: MODULES - value: kube_ovn_fastpath.ko - - name: RPMS - value: openvswitch-kmod - - name: POD_IPS - valueFrom: - fieldRef: - fieldPath: status.podIPs - - name: ENABLE_BIND_LOCAL_IP - value: "{{ kube_ovn_bind_local_ip_enabled }}" - - name: DBUS_SYSTEM_BUS_ADDRESS - value: "unix:path=/host/var/run/dbus/system_bus_socket" - volumeMounts: - - name: host-modules - mountPath: /lib/modules - readOnly: true - - name: shared-dir - mountPath: $KUBELET_DIR/pods - - mountPath: /etc/openvswitch - name: systemid - readOnly: true - - mountPath: /etc/cni/net.d - name: cni-conf - - mountPath: /run/openvswitch - name: host-run-ovs - mountPropagation: HostToContainer - - mountPath: /run/ovn - name: host-run-ovn - - mountPath: /host/var/run/dbus - name: host-dbus - mountPropagation: HostToContainer - - mountPath: /var/run/netns - name: host-ns - mountPropagation: HostToContainer - - mountPath: /var/log/kube-ovn - name: kube-ovn-log - - mountPath: /var/log/openvswitch - name: host-log-ovs - - mountPath: /var/log/ovn - name: host-log-ovn - - mountPath: /etc/localtime - name: localtime - readOnly: true - - mountPath: /tmp - name: tmp - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 7 - successThreshold: 1 - exec: - command: - - /kube-ovn/kube-ovn-healthcheck - - --port=10665 - - --tls=false - timeoutSeconds: 5 - readinessProbe: - failureThreshold: 3 - periodSeconds: 7 - successThreshold: 1 - exec: - command: - - /kube-ovn/kube-ovn-healthcheck - - --port=10665 - - --tls=false - timeoutSeconds: 5 - resources: - requests: - cpu: {{ kube_ovn_cni_server_cpu_request }} - memory: {{ kube_ovn_cni_server_memory_request }} - limits: - cpu: {{ kube_ovn_cni_server_cpu_limit }} - memory: {{ kube_ovn_cni_server_memory_limit }} - nodeSelector: - kubernetes.io/os: "linux" - volumes: - - name: host-modules - hostPath: - path: /lib/modules - - name: shared-dir - hostPath: - path: /var/lib/kubelet/pods - - name: systemid - hostPath: - path: /etc/origin/openvswitch - - name: host-run-ovs - hostPath: - path: /run/openvswitch - - name: host-run-ovn - hostPath: - path: /run/ovn - - name: cni-conf - hostPath: - path: /etc/cni/net.d - - name: cni-bin - hostPath: - path: /opt/cni/bin - - name: host-ns - hostPath: - path: /var/run/netns - - name: host-dbus - hostPath: - path: /var/run/dbus - - name: host-log-ovs - hostPath: - path: /var/log/openvswitch - - name: kube-ovn-log - hostPath: - path: /var/log/kube-ovn - - name: host-log-ovn - hostPath: - path: /var/log/ovn - - name: localtime - hostPath: - path: /etc/localtime - - name: tmp - hostPath: - path: /tmp - - name: local-bin - hostPath: - path: /usr/local/bin ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: kube-ovn-pinger - namespace: kube-system - annotations: - kubernetes.io/description: | - This daemon set launches the openvswitch daemon. -spec: - selector: - matchLabels: - app: kube-ovn-pinger - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - app: kube-ovn-pinger - component: network - type: infra - spec: - priorityClassName: system-node-critical - serviceAccountName: ovn - hostPID: true - containers: - - name: pinger - image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} - command: - - /kube-ovn/kube-ovn-pinger - args: - - --external-address={{ kube_ovn_external_address_merged }} - - --external-dns={{ kube_ovn_external_dns }} - - --logtostderr=false - - --alsologtostderr=true - - --log_file=/var/log/kube-ovn/kube-ovn-pinger.log - - --log_file_max_size=0 - imagePullPolicy: {{ k8s_image_pull_policy }} - securityContext: - runAsUser: 0 - privileged: false - env: - - name: ENABLE_SSL - value: "{{ kube_ovn_enable_ssl | lower }}" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /var/run/openvswitch - name: host-run-ovs - - mountPath: /var/run/ovn - name: host-run-ovn - - mountPath: /etc/openvswitch - name: host-config-openvswitch - - mountPath: /var/log/openvswitch - name: host-log-ovs - readOnly: true - - mountPath: /var/log/ovn - name: host-log-ovn - readOnly: true - - mountPath: /var/log/kube-ovn - name: kube-ovn-log - - mountPath: /etc/localtime - name: localtime - readOnly: true - - mountPath: /var/run/tls - name: kube-ovn-tls - resources: - requests: - cpu: {{ kube_ovn_pinger_cpu_request }} - memory: {{ kube_ovn_pinger_memory_request }} - limits: - cpu: {{ kube_ovn_pinger_cpu_limit }} - memory: {{ kube_ovn_pinger_memory_limit }} - nodeSelector: - kubernetes.io/os: "linux" - volumes: - - name: host-run-ovs - hostPath: - path: /run/openvswitch - - name: host-run-ovn - hostPath: - path: /run/ovn - - name: host-config-openvswitch - hostPath: - path: /etc/origin/openvswitch - - name: host-log-ovs - hostPath: - path: /var/log/openvswitch - - name: kube-ovn-log - hostPath: - path: /var/log/kube-ovn - - name: host-log-ovn - hostPath: - path: /var/log/ovn - - name: localtime - hostPath: - path: /etc/localtime - - name: kube-ovn-tls - secret: - optional: true - secretName: kube-ovn-tls ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: kube-ovn-monitor - namespace: kube-system - annotations: - kubernetes.io/description: | - Metrics for OVN components: northd, nb and sb. -spec: - replicas: 1 - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - selector: - matchLabels: - app: kube-ovn-monitor - template: - metadata: - labels: - app: kube-ovn-monitor - component: network - type: infra - spec: - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app: kube-ovn-monitor - topologyKey: kubernetes.io/hostname - priorityClassName: system-cluster-critical - serviceAccountName: ovn - hostNetwork: true - containers: - - name: kube-ovn-monitor - image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - command: ["/kube-ovn/start-ovn-monitor.sh"] - args: - - --secure-serving=false - - --log_file=/var/log/kube-ovn/kube-ovn-monitor.log - - --logtostderr=false - - --alsologtostderr=true - - --log_file_max_size=200 - securityContext: - runAsUser: 0 - privileged: false - env: - - name: ENABLE_SSL - value: "{{ kube_ovn_enable_ssl | lower }}" - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_IPS - valueFrom: - fieldRef: - fieldPath: status.podIPs - - name: ENABLE_BIND_LOCAL_IP - value: "{{ kube_ovn_bind_local_ip_enabled }}" - resources: - requests: - cpu: {{ kube_ovn_monitor_cpu_request }} - memory: {{ kube_ovn_monitor_memory_request }} - limits: - cpu: {{ kube_ovn_monitor_cpu_limit }} - memory: {{ kube_ovn_monitor_memory_limit }} - volumeMounts: - - mountPath: /var/run/openvswitch - name: host-run-ovs - - mountPath: /var/run/ovn - name: host-run-ovn - - mountPath: /etc/openvswitch - name: host-config-openvswitch - - mountPath: /etc/ovn - name: host-config-ovn - - mountPath: /var/log/ovn - name: host-log-ovn - readOnly: true - - mountPath: /etc/localtime - name: localtime - readOnly: true - - mountPath: /var/run/tls - name: kube-ovn-tls - - mountPath: /var/log/kube-ovn - name: kube-ovn-log - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 7 - successThreshold: 1 - exec: - command: - - /kube-ovn/kube-ovn-healthcheck - - --port=10661 - - --tls=false - timeoutSeconds: 5 - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 7 - successThreshold: 1 - exec: - command: - - /kube-ovn/kube-ovn-healthcheck - - --port=10661 - - --tls=false - timeoutSeconds: 5 - nodeSelector: - kubernetes.io/os: "linux" - kube-ovn/role: "master" - volumes: - - name: host-run-ovs - hostPath: - path: /run/openvswitch - - name: host-run-ovn - hostPath: - path: /run/ovn - - name: host-config-openvswitch - hostPath: - path: /etc/origin/openvswitch - - name: host-config-ovn - hostPath: - path: /etc/origin/ovn - - name: host-log-ovs - hostPath: - path: /var/log/openvswitch - - name: host-log-ovn - hostPath: - path: /var/log/ovn - - name: localtime - hostPath: - path: /etc/localtime - - name: kube-ovn-tls - secret: - optional: true - secretName: kube-ovn-tls - - name: kube-ovn-log - hostPath: - path: /var/log/kube-ovn ---- -kind: Service -apiVersion: v1 -metadata: - name: kube-ovn-monitor - namespace: kube-system - labels: - app: kube-ovn-monitor -spec: - ports: - - name: metrics - port: 10661 - type: ClusterIP -{% if ipv6_stack %} - ipFamilyPolicy: PreferDualStack -{% endif %} - selector: - app: kube-ovn-monitor - sessionAffinity: None ---- -kind: Service -apiVersion: v1 -metadata: - name: kube-ovn-pinger - namespace: kube-system - labels: - app: kube-ovn-pinger -spec: -{% if ipv6_stack %} - ipFamilyPolicy: PreferDualStack -{% endif %} - selector: - app: kube-ovn-pinger - ports: - - port: 8080 - name: metrics ---- -kind: Service -apiVersion: v1 -metadata: - name: kube-ovn-controller - namespace: kube-system - labels: - app: kube-ovn-controller -spec: -{% if ipv6_stack %} - ipFamilyPolicy: PreferDualStack -{% endif %} - selector: - app: kube-ovn-controller - ports: - - port: 10660 - name: metrics ---- -kind: Service -apiVersion: v1 -metadata: - name: kube-ovn-cni - namespace: kube-system - labels: - app: kube-ovn-cni -spec: -{% if ipv6_stack %} - ipFamilyPolicy: PreferDualStack -{% endif %} - selector: - app: kube-ovn-cni - ports: - - port: 10665 - name: metrics -{% if kube_ovn_ic_enable %} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: ovn-ic-config - namespace: kube-system -data: - enable-ic: "{{ kube_ovn_ic_enable | lower }}" - az-name: "{{ kube_ovn_ic_zone }}" - ic-db-host: "{{ kube_ovn_ic_dbhost }}" - ic-nb-port: "6645" - ic-sb-port: "6646" - gw-nodes: "{{ kube_ovn_central_hosts | join(',') }}" - auto-route: "{{ kube_ovn_ic_autoroute | lower }}" -{% endif %} diff --git a/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 deleted file mode 100644 index 09f0b291ae2..00000000000 --- a/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 +++ /dev/null @@ -1,674 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ovn-ovs - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.k8s.io/system-only: "true" - name: system:ovn-ovs -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - patch - - apiGroups: - - "" - resources: - - services - - endpoints - verbs: - - get - - apiGroups: - - apps - resources: - - controllerrevisions - verbs: - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: ovn-ovs -roleRef: - name: system:ovn-ovs - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: ovn-ovs - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ovn - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.k8s.io/system-only: "true" - name: system:ovn -rules: - - apiGroups: - - "kubeovn.io" - resources: - - vpcs - - vpcs/status - - vpc-nat-gateways - - vpc-nat-gateways/status - - subnets - - subnets/status - - ippools - - ippools/status - - ips - - vips - - vips/status - - vlans - - vlans/status - - provider-networks - - provider-networks/status - - security-groups - - security-groups/status - - iptables-eips - - iptables-fip-rules - - iptables-dnat-rules - - iptables-snat-rules - - iptables-eips/status - - iptables-fip-rules/status - - iptables-dnat-rules/status - - iptables-snat-rules/status - - ovn-eips - - ovn-fips - - ovn-snat-rules - - ovn-eips/status - - ovn-fips/status - - ovn-snat-rules/status - - ovn-dnat-rules - - ovn-dnat-rules/status - - switch-lb-rules - - switch-lb-rules/status - - vpc-dnses - - vpc-dnses/status - - qos-policies - - qos-policies/status - verbs: - - "*" - - apiGroups: - - "" - resources: - - pods - - namespaces - verbs: - - get - - list - - patch - - watch - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - apiGroups: - - "k8s.cni.cncf.io" - resources: - - network-attachment-definitions - verbs: - - get - - apiGroups: - - "" - - networking.k8s.io - resources: - - networkpolicies - - configmaps - verbs: - - get - - list - - watch - - apiGroups: - - apps - resources: - - daemonsets - verbs: - - get - - apiGroups: - - "" - resources: - - services - - services/status - verbs: - - get - - list - - update - - create - - delete - - watch - - apiGroups: - - "" - resources: - - endpoints - verbs: - - create - - update - - get - - list - - watch - - apiGroups: - - apps - resources: - - statefulsets - - deployments - - deployments/scale - verbs: - - get - - list - - create - - delete - - update - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - "*" - - apiGroups: - - "kubevirt.io" - resources: - - virtualmachines - - virtualmachineinstances - verbs: - - get - - list - - apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create - - apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: ovn -roleRef: - name: system:ovn - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: ovn - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: ovn - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: - - kind: ServiceAccount - name: ovn - namespace: kube-system ---- -kind: Service -apiVersion: v1 -metadata: - name: ovn-nb - namespace: kube-system -spec: - ports: - - name: ovn-nb - protocol: TCP - port: 6641 - targetPort: 6641 - type: ClusterIP -{% if ipv6_stack %} - ipFamilyPolicy: PreferDualStack -{% endif %} - selector: - app: ovn-central - ovn-nb-leader: "true" - sessionAffinity: None ---- -kind: Service -apiVersion: v1 -metadata: - name: ovn-sb - namespace: kube-system -spec: - ports: - - name: ovn-sb - protocol: TCP - port: 6642 - targetPort: 6642 - type: ClusterIP -{% if ipv6_stack %} - ipFamilyPolicy: PreferDualStack -{% endif %} - selector: - app: ovn-central - ovn-sb-leader: "true" - sessionAffinity: None ---- -kind: Service -apiVersion: v1 -metadata: - name: ovn-northd - namespace: kube-system -spec: - ports: - - name: ovn-northd - protocol: TCP - port: 6643 - targetPort: 6643 - type: ClusterIP -{% if ipv6_stack %} - ipFamilyPolicy: PreferDualStack -{% endif %} - selector: - app: ovn-central - ovn-northd-leader: "true" - sessionAffinity: None ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: ovn-central - namespace: kube-system - annotations: - kubernetes.io/description: | - OVN components: northd, nb and sb. -spec: - replicas: {{ kube_ovn_central_replics }} - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - type: RollingUpdate - selector: - matchLabels: - app: ovn-central - template: - metadata: - labels: - app: ovn-central - component: network - type: infra - spec: - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app: ovn-central - topologyKey: kubernetes.io/hostname - priorityClassName: system-cluster-critical - serviceAccountName: ovn-ovs - hostNetwork: true - containers: - - name: ovn-central - image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - command: ["/kube-ovn/start-db.sh"] - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - - SYS_NICE - env: - - name: ENABLE_SSL - value: "{{ kube_ovn_enable_ssl | lower }}" - - name: NODE_IPS - value: "{{ kube_ovn_central_ips }}" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IPS - valueFrom: - fieldRef: - fieldPath: status.podIPs - - name: ENABLE_BIND_LOCAL_IP - value: "{{ kube_ovn_bind_local_ip_enabled }}" - - name: PROBE_INTERVAL - value: "180000" - - name: OVN_NORTHD_PROBE_INTERVAL - value: "5000" - - name: OVN_LEADER_PROBE_INTERVAL - value: "5" - resources: - requests: - cpu: {{ kube_ovn_db_cpu_request }} - memory: {{ kube_ovn_db_memory_request }} - limits: - cpu: {{ kube_ovn_db_cpu_limit }} - memory: {{ kube_ovn_db_memory_limit }} - volumeMounts: - - mountPath: /var/run/openvswitch - name: host-run-ovs - - mountPath: /var/run/ovn - name: host-run-ovn - - mountPath: /sys - name: host-sys - readOnly: true - - mountPath: /etc/openvswitch - name: host-config-openvswitch - - mountPath: /etc/ovn - name: host-config-ovn - - mountPath: /var/log/openvswitch - name: host-log-ovs - - mountPath: /var/log/ovn - name: host-log-ovn - - mountPath: /etc/localtime - name: localtime - - mountPath: /var/run/tls - name: kube-ovn-tls - readinessProbe: - exec: - command: - - bash - - /kube-ovn/ovn-healthcheck.sh - periodSeconds: 15 - timeoutSeconds: 45 - livenessProbe: - exec: - command: - - bash - - /kube-ovn/ovn-healthcheck.sh - initialDelaySeconds: 30 - periodSeconds: 15 - failureThreshold: 5 - timeoutSeconds: 45 - nodeSelector: - kubernetes.io/os: "linux" - kube-ovn/role: "master" - volumes: - - name: host-run-ovs - hostPath: - path: /run/openvswitch - - name: host-run-ovn - hostPath: - path: /run/ovn - - name: host-sys - hostPath: - path: /sys - - name: host-config-openvswitch - hostPath: - path: /etc/origin/openvswitch - - name: host-config-ovn - hostPath: - path: /etc/origin/ovn - - name: host-log-ovs - hostPath: - path: /var/log/openvswitch - - name: host-log-ovn - hostPath: - path: /var/log/ovn - - name: localtime - hostPath: - path: /etc/localtime - - name: kube-ovn-tls - secret: - optional: true - secretName: kube-ovn-tls ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: ovs-ovn - namespace: kube-system - annotations: - kubernetes.io/description: | - This daemon set launches the openvswitch daemon. -spec: - selector: - matchLabels: - app: ovs - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - template: - metadata: - labels: - app: ovs - component: network - type: infra - spec: - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - priorityClassName: system-node-critical - serviceAccountName: ovn-ovs - hostNetwork: true - hostPID: true - containers: - - name: openvswitch - image: {% if kube_ovn_dpdk_enabled %}{{ kube_ovn_dpdk_container_image_repo }}:{{ kube_ovn_dpdk_container_image_tag }}{% else %}{{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}{% endif %} - - imagePullPolicy: {{ k8s_image_pull_policy }} - command: [{% if kube_ovn_dpdk_enabled %}"/kube-ovn/start-ovs-dpdk.sh"{% else %}"/kube-ovn/start-ovs.sh"{% endif %}] - securityContext: - runAsUser: 0 - privileged: false - capabilities: - add: - - NET_ADMIN - - NET_BIND_SERVICE - - SYS_MODULE - - SYS_NICE - env: - - name: ENABLE_SSL - value: "{{ kube_ovn_enable_ssl | lower }}" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace -{% if not kube_ovn_dpdk_enabled %} - - name: HW_OFFLOAD - value: "{{ kube_ovn_hw_offload | string | lower }}" - - name: TUNNEL_TYPE - value: "{{ kube_ovn_tunnel_type }}" -{% endif %} - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: OVN_DB_IPS - value: "{{ kube_ovn_central_ips }}" - volumeMounts: - - mountPath: /var/run/netns - name: host-ns - mountPropagation: HostToContainer - - mountPath: /lib/modules - name: host-modules - readOnly: true - - mountPath: /var/run/openvswitch - name: host-run-ovs - - mountPath: /var/run/ovn - name: host-run-ovn - - mountPath: /sys - name: host-sys - readOnly: true - - mountPath: /etc/cni/net.d - name: cni-conf - - mountPath: /etc/openvswitch - name: host-config-openvswitch - - mountPath: /etc/ovn - name: host-config-ovn - - mountPath: /var/log/openvswitch - name: host-log-ovs - - mountPath: /var/log/ovn - name: host-log-ovn -{% if kube_ovn_dpdk_enabled %} - - mountPath: /opt/ovs-config - name: host-config-ovs - - mountPath: /dev/hugepages - name: hugepage -{% endif %} - - mountPath: /etc/localtime - name: localtime - - mountPath: /var/run/tls - name: kube-ovn-tls - - mountPath: /var/run/containerd - name: cruntime - readOnly: true - readinessProbe: - exec: - command: - - bash -{% if kube_ovn_dpdk_enabled %} - - /kube-ovn/ovs-dpdk-healthcheck.sh -{% else %} - - /kube-ovn/ovs-healthcheck.sh -{% endif %} - periodSeconds: 5 - timeoutSeconds: 45 - livenessProbe: - exec: - command: - - bash -{% if kube_ovn_dpdk_enabled %} - - /kube-ovn/ovs-dpdk-healthcheck.sh -{% else %} - - /kube-ovn/ovs-healthcheck.sh -{% endif %} - initialDelaySeconds: 60 - periodSeconds: 5 - failureThreshold: 5 - timeoutSeconds: 45 - resources: -{% if kube_ovn_dpdk_enabled %} - requests: - cpu: {{ kube_ovn_dpdk_node_cpu_request }} - memory: {{ kube_ovn_dpdk_node_memory_request }} - limits: - cpu: {{ kube_ovn_dpdk_node_cpu_limit }} - memory: {{ kube_ovn_dpdk_node_memory_limit }} - hugepages-1Gi: 1Gi -{% else %} - requests: - cpu: {{ kube_ovn_node_cpu_request }} - memory: {{ kube_ovn_node_memory_request }} - limits: - cpu: {{ kube_ovn_node_cpu_limit }} - memory: {{ kube_ovn_node_memory_limit }} -{% endif %} - nodeSelector: - kubernetes.io/os: "linux" - volumes: - - name: host-modules - hostPath: - path: /lib/modules - - name: host-run-ovs - hostPath: - path: /run/openvswitch - - name: host-run-ovn - hostPath: - path: /run/ovn - - name: host-sys - hostPath: - path: /sys - - name: host-ns - hostPath: - path: /var/run/netns - - name: cni-conf - hostPath: - path: /etc/cni/net.d - - name: host-config-openvswitch - hostPath: - path: /etc/origin/openvswitch - - name: host-config-ovn - hostPath: - path: /etc/origin/ovn - - name: host-log-ovs - hostPath: - path: /var/log/openvswitch - - name: host-log-ovn - hostPath: - path: /var/log/ovn -{% if kube_ovn_dpdk_enabled %} - - name: host-config-ovs - hostPath: - path: /opt/ovs-config - type: DirectoryOrCreate - - name: hugepage - emptyDir: - medium: HugePages -{% endif %} - - name: localtime - hostPath: - path: /etc/localtime - - name: cruntime - hostPath: - path: /var/run/containerd - - name: kube-ovn-tls - secret: - optional: true - secretName: kube-ovn-tls diff --git a/roles/network_plugin/kube-router/defaults/main.yml b/roles/network_plugin/kube-router/defaults/main.yml deleted file mode 100644 index c01a3532bd8..00000000000 --- a/roles/network_plugin/kube-router/defaults/main.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- -# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP -kube_router_run_router: true - -# Enables Network Policy -- sets up iptables to provide ingress firewall for pods -kube_router_run_firewall: true - -# Enables Service Proxy -- sets up IPVS for Kubernetes Services -# see docs/kube-router.md "Caveats" section -kube_router_run_service_proxy: false - -# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. -kube_router_advertise_cluster_ip: false - -# Add External IP of service to the RIB so that it gets advertised to the BGP peers. -kube_router_advertise_external_ip: false - -# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. -kube_router_advertise_loadbalancer_ip: false - -# Enables BGP graceful restarts -kube_router_bgp_graceful_restart: true - -# Adjust manifest of kube-router daemonset template with DSR needed changes -kube_router_enable_dsr: false - -# Array of arbitrary extra arguments to kube-router, see -# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md -kube_router_extra_args: [] - -# ASN number of the cluster, used when communicating with external BGP routers -kube_router_cluster_asn: ~ - -# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. -kube_router_peer_router_asns: ~ - -# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. -kube_router_peer_router_ips: ~ - -# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. -kube_router_peer_router_ports: ~ - -# Setups node CNI to allow hairpin mode, requires node reboots, see -# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode -kube_router_support_hairpin_mode: false - -# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. -kube_router_dns_policy: ClusterFirstWithHostNet - -# Adds annotations to kubernetes nodes for advanced configuration of BGP Peers. -# https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md - -# Array of annotations for master -kube_router_annotations_master: [] - -# Array of annotations for every node -kube_router_annotations_node: [] - -# Array of common annotations for every node -kube_router_annotations_all: [] - -# Enables scraping kube-router metrics with Prometheus -kube_router_enable_metrics: false - -# Path to serve Prometheus metrics on -kube_router_metrics_path: /metrics - -# Prometheus metrics port to use -kube_router_metrics_port: 9255 diff --git a/roles/network_plugin/kube-router/handlers/main.yml b/roles/network_plugin/kube-router/handlers/main.yml deleted file mode 100644 index ad5eb21401d..00000000000 --- a/roles/network_plugin/kube-router/handlers/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Kube-router | delete kube-router docker containers - shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f" - args: - executable: /bin/bash - register: docker_kube_router_remove - until: docker_kube_router_remove is succeeded - retries: 5 - when: container_manager in ["docker"] - listen: Reset_kube_router - -- name: Kube-router | delete kube-router crio/containerd containers - shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' - args: - executable: /bin/bash - register: crictl_kube_router_remove - until: crictl_kube_router_remove is succeeded - retries: 5 - when: container_manager in ["crio", "containerd"] - listen: Reset_kube_router diff --git a/roles/network_plugin/kube-router/meta/main.yml b/roles/network_plugin/kube-router/meta/main.yml deleted file mode 100644 index 9b7065f1854..00000000000 --- a/roles/network_plugin/kube-router/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: network_plugin/cni diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml deleted file mode 100644 index 9cb7f6e7c43..00000000000 --- a/roles/network_plugin/kube-router/tasks/annotate.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Kube-router | Add annotations on kube_control_plane - command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" - with_items: - - "{{ kube_router_annotations_master }}" - delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: kube_router_annotations_master is defined and 'kube_control_plane' in group_names - -- name: Kube-router | Add annotations on kube_node - command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" - with_items: - - "{{ kube_router_annotations_node }}" - delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: kube_router_annotations_node is defined and 'kube_node' in group_names - -- name: Kube-router | Add common annotations on all servers - command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" - with_items: - - "{{ kube_router_annotations_all }}" - delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: kube_router_annotations_all is defined and 'k8s_cluster' in group_names diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml deleted file mode 100644 index 561ed688734..00000000000 --- a/roles/network_plugin/kube-router/tasks/main.yml +++ /dev/null @@ -1,84 +0,0 @@ ---- -- name: Kube-router | Create annotations - import_tasks: annotate.yml - tags: annotate - -- name: Kube-router | Create config directory - file: - path: /var/lib/kube-router - state: directory - owner: "{{ kube_owner }}" - recurse: true - mode: "0755" - -- name: Kube-router | Create kubeconfig - template: - src: kubeconfig.yml.j2 - dest: /var/lib/kube-router/kubeconfig - mode: "0644" - owner: "{{ kube_owner }}" - notify: - - Reset_kube_router - -- name: Kube-router | Slurp cni config - slurp: - src: /etc/cni/net.d/10-kuberouter.conflist - register: cni_config_slurp - ignore_errors: true # noqa ignore-errors - -- name: Kube-router | Set cni_config variable - set_fact: - cni_config: "{{ cni_config_slurp.content | b64decode | from_json }}" - when: - - not cni_config_slurp.failed - -- name: Kube-router | Set host_subnet variable - when: - - cni_config is defined - - cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0 - set_fact: - host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}" - -- name: Kube-router | Create cni config - template: - src: cni-conf.json.j2 - dest: /etc/cni/net.d/10-kuberouter.conflist - mode: "0644" - owner: "{{ kube_owner }}" - notify: - - Reset_kube_router - -- name: Kube-router | Delete old configuration - file: - path: /etc/cni/net.d/10-kuberouter.conf - state: absent - -- name: Kube-router | Create manifest - template: - src: kube-router.yml.j2 - dest: "{{ kube_config_dir }}/kube-router.yml" - mode: "0644" - delegate_to: "{{ groups['kube_control_plane'] | first }}" - run_once: true - -- name: Kube-router | Start Resources - kube: - name: "kube-router" - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/kube-router.yml" - resource: "ds" - namespace: "kube-system" - state: "latest" - delegate_to: "{{ groups['kube_control_plane'] | first }}" - run_once: true - -- name: Kube-router | Wait for kube-router pods to be ready - command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors - register: pods_not_ready - until: pods_not_ready.stdout.find("kube-router")==-1 - retries: 30 - delay: 10 - ignore_errors: true - delegate_to: "{{ groups['kube_control_plane'] | first }}" - run_once: true - changed_when: false diff --git a/roles/network_plugin/kube-router/tasks/reset.yml b/roles/network_plugin/kube-router/tasks/reset.yml deleted file mode 100644 index 32f707591e4..00000000000 --- a/roles/network_plugin/kube-router/tasks/reset.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Reset | check kube-dummy-if network device - stat: - path: /sys/class/net/kube-dummy-if - get_attributes: false - get_checksum: false - get_mime: false - register: kube_dummy_if - -- name: Reset | remove the network device created by kube-router - command: ip link del kube-dummy-if - when: kube_dummy_if.stat.exists - -- name: Check kube-bridge exists - stat: - path: /sys/class/net/kube-bridge - get_attributes: false - get_checksum: false - get_mime: false - register: kube_bridge_if - -- name: Reset | donw the network bridge create by kube-router - command: ip link set kube-bridge down - when: kube_bridge_if.stat.exists - -- name: Reset | remove the network bridge create by kube-router - command: ip link del kube-bridge - when: kube_bridge_if.stat.exists diff --git a/roles/network_plugin/kube-router/templates/cni-conf.json.j2 b/roles/network_plugin/kube-router/templates/cni-conf.json.j2 deleted file mode 100644 index 91fafacc4fc..00000000000 --- a/roles/network_plugin/kube-router/templates/cni-conf.json.j2 +++ /dev/null @@ -1,27 +0,0 @@ -{ - "cniVersion":"0.3.0", - "name":"kubernetes", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, -{% if kube_router_support_hairpin_mode %} - "hairpinMode":true, -{% endif %} - "ipam":{ -{% if host_subnet is defined %} - "subnet": "{{ host_subnet }}", -{% endif %} - "type":"host-local" - } - }, - { - "type":"portmap", - "capabilities":{ - "portMappings":true - } - } - ] -} diff --git a/roles/network_plugin/kube-router/templates/kube-router.yml.j2 b/roles/network_plugin/kube-router/templates/kube-router.yml.j2 deleted file mode 100644 index d868287d481..00000000000 --- a/roles/network_plugin/kube-router/templates/kube-router.yml.j2 +++ /dev/null @@ -1,228 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - minReadySeconds: 3 - updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: -{% if kube_router_enable_metrics %} - prometheus.io/path: {{ kube_router_metrics_path }} - prometheus.io/port: "{{ kube_router_metrics_port }}" - prometheus.io/scrape: "true" -{% endif %} - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - containers: - - name: kube-router - image: {{ kube_router_image_repo }}:{{ kube_router_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - args: - - --run-router={{ kube_router_run_router | bool }} - - --run-firewall={{ kube_router_run_firewall | bool }} - - --run-service-proxy={{ kube_router_run_service_proxy | bool }} - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --bgp-graceful-restart={{ kube_router_bgp_graceful_restart }} -{% if kube_router_advertise_cluster_ip %} - - --advertise-cluster-ip -{% endif %} -{% if kube_router_advertise_external_ip %} - - --advertise-external-ip -{% endif %} -{% if kube_router_advertise_loadbalancer_ip %} - - --advertise-loadbalancer-ip -{% endif %} -{% if kube_router_cluster_asn %} - - --cluster-asn={{ kube_router_cluster_asn }} -{% endif %} -{% if kube_router_peer_router_asns %} - - --peer-router-asns={{ kube_router_peer_router_asns }} -{% endif %} -{% if kube_router_peer_router_ips %} - - --peer-router-ips={{ kube_router_peer_router_ips }} -{% endif %} -{% if kube_router_peer_router_ports %} - - --peer-router-ports={{ kube_router_peer_router_ports }} -{% endif %} -{% if kube_router_enable_metrics %} - - --metrics-path={{ kube_router_metrics_path }} - - --metrics-port={{ kube_router_metrics_port }} -{% endif %} -{% if kube_router_enable_dsr %} -{% if container_manager == "docker" %} - - --runtime-endpoint=unix:///var/run/docker.sock -{% endif %} -{% if container_manager == "containerd" %} -{% endif %} - - --runtime-endpoint=unix:///run/containerd/containerd.sock -{% endif %} -{% for arg in kube_router_extra_args %} - - "{{ arg }}" -{% endfor %} - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: -{% if kube_router_enable_dsr %} -{% if container_manager == "docker" %} - - name: docker-socket - mountPath: /var/run/docker.sock - readOnly: true -{% endif %} -{% if container_manager == "containerd" %} - - name: containerd-socket - mountPath: /run/containerd/containerd.sock - readOnly: true -{% endif %} -{% endif %} - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false -{% if kube_router_enable_metrics %} - ports: - - containerPort: {{ kube_router_metrics_port }} - hostPort: {{ kube_router_metrics_port }} - name: metrics - protocol: TCP -{% endif %} - hostNetwork: true - dnsPolicy: {{ kube_router_dns_policy }} -{% if kube_router_enable_dsr %} - hostIPC: true - hostPID: true -{% endif %} - tolerations: - - operator: Exists - volumes: -{% if kube_router_enable_dsr %} -{% if container_manager == "docker" %} - - name: docker-socket - hostPath: - path: /var/run/docker.sock - type: Socket -{% endif %} -{% if container_manager == "containerd" %} - - name: containerd-socket - hostPath: - path: /run/containerd/containerd.sock - type: Socket -{% endif %} -{% endif %} - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kubeconfig - hostPath: - path: /var/lib/kube-router - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 b/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 deleted file mode 100644 index 470885111d9..00000000000 --- a/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -clusterCIDR: {{ kube_pods_subnets }} -clusters: -- name: cluster - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: {{ kube_apiserver_endpoint }} -users: -- name: kube-router - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token -contexts: -- context: - cluster: cluster - user: kube-router - name: kube-router-context -current-context: kube-router-context diff --git a/roles/network_plugin/macvlan/defaults/main.yml b/roles/network_plugin/macvlan/defaults/main.yml deleted file mode 100644 index 70a8dd02826..00000000000 --- a/roles/network_plugin/macvlan/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -macvlan_interface: eth0 -enable_nat_default_gateway: true - -# sysctl_file_path to add sysctl conf to -sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" diff --git a/roles/network_plugin/macvlan/files/ifdown-local b/roles/network_plugin/macvlan/files/ifdown-local deleted file mode 100644 index 003b8a1b41f..00000000000 --- a/roles/network_plugin/macvlan/files/ifdown-local +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -POSTDOWNNAME="/etc/sysconfig/network-scripts/post-down-$1" -if [ -x $POSTDOWNNAME ]; then - exec $POSTDOWNNAME -fi diff --git a/roles/network_plugin/macvlan/files/ifdown-macvlan b/roles/network_plugin/macvlan/files/ifdown-macvlan deleted file mode 100755 index b79b9c11ec8..00000000000 --- a/roles/network_plugin/macvlan/files/ifdown-macvlan +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# -# initscripts-macvlan -# Copyright (C) 2014 Lars Kellogg-Stedman -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -. /etc/init.d/functions - -cd /etc/sysconfig/network-scripts -. ./network-functions - -[ -f ../network ] && . ../network - -CONFIG=${1} - -need_config ${CONFIG} - -source_config - -OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-${REAL_DEVICETYPE}" - -if [ ! -x ${OTHERSCRIPT} ]; then - OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-eth" -fi - -${OTHERSCRIPT} ${CONFIG} - -ip link del ${DEVICE} type ${TYPE:-macvlan} diff --git a/roles/network_plugin/macvlan/files/ifup-local b/roles/network_plugin/macvlan/files/ifup-local deleted file mode 100755 index 3b6891eb996..00000000000 --- a/roles/network_plugin/macvlan/files/ifup-local +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -POSTUPNAME="/etc/sysconfig/network-scripts/post-up-$1" -if [ -x $POSTUPNAME ]; then - exec $POSTUPNAME -fi diff --git a/roles/network_plugin/macvlan/files/ifup-macvlan b/roles/network_plugin/macvlan/files/ifup-macvlan deleted file mode 100755 index 97daec0c4d3..00000000000 --- a/roles/network_plugin/macvlan/files/ifup-macvlan +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# -# initscripts-macvlan -# Copyright (C) 2014 Lars Kellogg-Stedman -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -. /etc/init.d/functions - -cd /etc/sysconfig/network-scripts -. ./network-functions - -[ -f ../network ] && . ../network - -CONFIG=${1} - -need_config ${CONFIG} - -source_config - -OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-${REAL_DEVICETYPE}" - -if [ ! -x ${OTHERSCRIPT} ]; then - OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-eth" -fi - -ip link add \ - link ${MACVLAN_PARENT} \ - name ${DEVICE} \ - type ${TYPE:-macvlan} mode ${MACVLAN_MODE:-private} - -${OTHERSCRIPT} ${CONFIG} diff --git a/roles/network_plugin/macvlan/handlers/main.yml b/roles/network_plugin/macvlan/handlers/main.yml deleted file mode 100644 index e4844c22174..00000000000 --- a/roles/network_plugin/macvlan/handlers/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Macvlan | reload network - service: - # noqa: jinja[spacing] - name: >- - {% if ansible_os_family == "RedHat" -%} - network - {%- elif ansible_distribution == "Ubuntu" and ansible_distribution_release == "bionic" -%} - systemd-networkd - {%- elif ansible_os_family == "Debian" -%} - networking - {%- endif %} - state: restarted - when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and kube_network_plugin not in ['calico'] - listen: Macvlan | restart network diff --git a/roles/network_plugin/macvlan/meta/main.yml b/roles/network_plugin/macvlan/meta/main.yml deleted file mode 100644 index 9b7065f1854..00000000000 --- a/roles/network_plugin/macvlan/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: network_plugin/cni diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml deleted file mode 100644 index 6ffe3348cd0..00000000000 --- a/roles/network_plugin/macvlan/tasks/main.yml +++ /dev/null @@ -1,110 +0,0 @@ ---- -- name: Macvlan | Retrieve Pod Cidr - command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" - changed_when: false - register: node_pod_cidr_cmd - delegate_to: "{{ groups['kube_control_plane'][0] }}" - -- name: Macvlan | set node_pod_cidr - set_fact: - node_pod_cidr: "{{ node_pod_cidr_cmd.stdout }}" - -- name: Macvlan | Retrieve default gateway network interface - become: false - raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/' - changed_when: false - register: node_default_gateway_interface_cmd - -- name: Macvlan | set node_default_gateway_interface - set_fact: - node_default_gateway_interface: "{{ node_default_gateway_interface_cmd.stdout | trim }}" - -- name: Macvlan | Install network gateway interface on debian - template: - src: debian-network-macvlan.cfg.j2 - dest: /etc/network/interfaces.d/60-mac0.cfg - mode: "0644" - notify: Macvlan | restart network - when: ansible_os_family in ["Debian"] - -- name: Install macvlan config on RH distros - when: ansible_os_family == "RedHat" - block: - - name: Macvlan | Install macvlan script on centos - copy: - src: "{{ item }}" - dest: /etc/sysconfig/network-scripts/ - owner: root - group: root - mode: "0755" - with_fileglob: - - files/* - - - name: Macvlan | Install post-up script on centos - copy: - src: "files/ifup-local" - dest: /sbin/ - owner: root - group: root - mode: "0755" - when: enable_nat_default_gateway - - - name: Macvlan | Install network gateway interface on centos - template: - src: "{{ item.src }}.j2" - dest: "/etc/sysconfig/network-scripts/{{ item.dst }}" - mode: "0644" - with_items: - - {src: centos-network-macvlan.cfg, dst: ifcfg-mac0 } - - {src: centos-routes-macvlan.cfg, dst: route-mac0 } - - {src: centos-postup-macvlan.cfg, dst: post-up-mac0 } - notify: Macvlan | restart network - -- name: Install macvlan config on Flatcar - when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] - block: - - name: Macvlan | Install service nat via gateway on Flatcar Container Linux - template: - src: coreos-service-nat_ouside.j2 - dest: /etc/systemd/system/enable_nat_ouside.service - mode: "0644" - when: enable_nat_default_gateway - - - name: Macvlan | Enable service nat via gateway on Flatcar Container Linux - command: "{{ item }}" - with_items: - - systemctl daemon-reload - - systemctl enable enable_nat_ouside.service - when: enable_nat_default_gateway - - - name: Macvlan | Install network gateway interface on Flatcar Container Linux - template: - src: "{{ item.src }}.j2" - dest: "/etc/systemd/network/{{ item.dst }}" - mode: "0644" - with_items: - - {src: coreos-device-macvlan.cfg, dst: macvlan.netdev } - - {src: coreos-interface-macvlan.cfg, dst: output.network } - - {src: coreos-network-macvlan.cfg, dst: macvlan.network } - notify: Macvlan | restart network - -- name: Macvlan | Install cni definition for Macvlan - template: - src: 10-macvlan.conf.j2 - dest: /etc/cni/net.d/10-macvlan.conf - mode: "0644" - -- name: Macvlan | Install loopback definition for Macvlan - template: - src: 99-loopback.conf.j2 - dest: /etc/cni/net.d/99-loopback.conf - mode: "0644" - -- name: Enable net.ipv4.conf.all.arp_notify in sysctl - ansible.posix.sysctl: - name: net.ipv4.conf.all.arp_notify - value: 1 - sysctl_set: true - sysctl_file: "{{ sysctl_file_path }}" - state: present - reload: true diff --git a/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 b/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 deleted file mode 100644 index 8924547600b..00000000000 --- a/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 +++ /dev/null @@ -1,15 +0,0 @@ -{ - "cniVersion": "0.4.0", - "name": "mynet", - "type": "macvlan", - "master": "{{ macvlan_interface }}", - "hairpinMode": true, - "ipam": { - "type": "host-local", - "subnet": "{{ node_pod_cidr }}", - "routes": [ - { "dst": "0.0.0.0/0" } - ], - "gateway": "{{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }}" - } -} diff --git a/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 b/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 deleted file mode 100644 index b41ab65841e..00000000000 --- a/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 +++ /dev/null @@ -1,5 +0,0 @@ -{ - "cniVersion": "0.2.0", - "name": "lo", - "type": "loopback" -} diff --git a/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 deleted file mode 100644 index 1e6c0aab6a3..00000000000 --- a/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 +++ /dev/null @@ -1,13 +0,0 @@ -DEVICE=mac0 -DEVICETYPE=macvlan -TYPE=macvlan -BOOTPROTO=none -ONBOOT=yes -NM_CONTROLLED=no - -MACVLAN_PARENT={{ macvlan_interface }} -MACVLAN_MODE=bridge - -IPADDR={{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }} -NETMASK={{ node_pod_cidr|ansible.utils.ipaddr('netmask') }} -NETWORK={{ node_pod_cidr|ansible.utils.ipaddr('network') }} diff --git a/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 deleted file mode 100644 index 87f1f56a39f..00000000000 --- a/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% if enable_nat_default_gateway %} -iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE -{% endif %} diff --git a/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 deleted file mode 100644 index 254827e4358..00000000000 --- a/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% if enable_nat_default_gateway %} -iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE -{% endif %} diff --git a/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 deleted file mode 100644 index 60400dd4917..00000000000 --- a/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 +++ /dev/null @@ -1,7 +0,0 @@ -{% for host in groups['kube_node'] %} -{% if hostvars[host]['access_ip'] is defined %} -{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} -{{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} -{% endif %} -{% endif %} -{% endfor %} diff --git a/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 deleted file mode 100644 index 2418dacfebb..00000000000 --- a/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[NetDev] -Name=mac0 -Kind=macvlan - -[MACVLAN] -Mode=bridge diff --git a/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 deleted file mode 100644 index 342f68081fb..00000000000 --- a/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[Match] -Name={{ macvlan_interface }} - -[Network] -MACVLAN=mac0 -DHCP=yes diff --git a/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 deleted file mode 100644 index 0c4c33b0a67..00000000000 --- a/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 +++ /dev/null @@ -1,17 +0,0 @@ -[Match] -Name=mac0 - -[Network] -Address={{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }}/{{ node_pod_cidr|ansible.utils.ipaddr('prefix') }} - -{% for host in groups['kube_node'] %} -{% if hostvars[host]['access_ip'] is defined %} -{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} -[Route] -Gateway={{ hostvars[host]['access_ip'] }} -Destination={{ hostvars[host]['node_pod_cidr'] }} -GatewayOnlink=yes - -{% endif %} -{% endif %} -{% endfor %} diff --git a/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 b/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 deleted file mode 100644 index 1d8df03191d..00000000000 --- a/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[Service] -Type=oneshot -ExecStart=/bin/bash -c "iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE" - -[Install] -WantedBy=sys-subsystem-net-devices-mac0.device diff --git a/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 deleted file mode 100644 index cbd4325c9da..00000000000 --- a/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 +++ /dev/null @@ -1,26 +0,0 @@ -auto mac0 -iface mac0 inet static - address {{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }} - network {{ node_pod_cidr|ansible.utils.ipaddr('network') }} - netmask {{ node_pod_cidr|ansible.utils.ipaddr('netmask') }} - broadcast {{ node_pod_cidr|ansible.utils.ipaddr('broadcast') }} - pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge -{% for host in groups['kube_node'] %} -{% if hostvars[host]['access_ip'] is defined %} -{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} - post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} -{% endif %} -{% endif %} -{% endfor %} -{% if enable_nat_default_gateway %} - post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE -{% endif %} -{% for host in groups['kube_node'] %} -{% if hostvars[host]['access_ip'] is defined %} -{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} - post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} -{% endif %} -{% endif %} -{% endfor %} - post-down iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE - post-down ip link delete mac0 diff --git a/roles/network_plugin/meta/main.yml b/roles/network_plugin/meta/main.yml index da0cf75efa1..27f37df014c 100644 --- a/roles/network_plugin/meta/main.yml +++ b/roles/network_plugin/meta/main.yml @@ -1,44 +1,6 @@ --- dependencies: - - role: network_plugin/cni - when: kube_network_plugin != 'none' - - role: network_plugin/cilium when: kube_network_plugin == 'cilium' or cilium_deploy_additionally tags: - cilium - - - role: network_plugin/calico - when: kube_network_plugin == 'calico' - tags: - - calico - - - role: network_plugin/flannel - when: kube_network_plugin == 'flannel' - tags: - - flannel - - - role: network_plugin/macvlan - when: kube_network_plugin == 'macvlan' - tags: - - macvlan - - - role: network_plugin/kube-ovn - when: kube_network_plugin == 'kube-ovn' - tags: - - kube-ovn - - - role: network_plugin/kube-router - when: kube_network_plugin == 'kube-router' - tags: - - kube-router - - - role: network_plugin/custom_cni - when: kube_network_plugin == 'custom_cni' - tags: - - custom_cni - - - role: network_plugin/multus - when: kube_network_plugin_multus - tags: - - multus diff --git a/roles/network_plugin/multus/defaults/main.yml b/roles/network_plugin/multus/defaults/main.yml deleted file mode 100644 index a982ba6ba31..00000000000 --- a/roles/network_plugin/multus/defaults/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -multus_conf_file: "auto" -multus_cni_conf_dir_host: "/etc/cni/net.d" -multus_cni_bin_dir_host: "/opt/cni/bin" -multus_cni_run_dir_host: "/run" -multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}" -multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}" -multus_cni_run_dir: "{{ ('/host', multus_cni_run_dir_host) | join }}" -multus_kubeconfig_file_host: "{{ (multus_cni_conf_dir_host, '/multus.d/multus.kubeconfig') | join }}" -multus_namespace_isolation: false diff --git a/roles/network_plugin/multus/files/multus-clusterrole.yml b/roles/network_plugin/multus/files/multus-clusterrole.yml deleted file mode 100644 index b574069cd9b..00000000000 --- a/roles/network_plugin/multus/files/multus-clusterrole.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: multus -rules: - - apiGroups: ["k8s.cni.cncf.io"] - resources: - - '*' - verbs: - - '*' - - apiGroups: - - "" - resources: - - pods - - pods/status - verbs: - - get - - update - - apiGroups: - - "" - - events.k8s.io - resources: - - events - verbs: - - create - - patch - - update diff --git a/roles/network_plugin/multus/files/multus-clusterrolebinding.yml b/roles/network_plugin/multus/files/multus-clusterrolebinding.yml deleted file mode 100644 index 2d1e1a4f41a..00000000000 --- a/roles/network_plugin/multus/files/multus-clusterrolebinding.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: multus -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: multus -subjects: -- kind: ServiceAccount - name: multus - namespace: kube-system diff --git a/roles/network_plugin/multus/files/multus-crd.yml b/roles/network_plugin/multus/files/multus-crd.yml deleted file mode 100644 index 24b2c58fca4..00000000000 --- a/roles/network_plugin/multus/files/multus-crd.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: network-attachment-definitions.k8s.cni.cncf.io -spec: - group: k8s.cni.cncf.io - scope: Namespaced - names: - plural: network-attachment-definitions - singular: network-attachment-definition - kind: NetworkAttachmentDefinition - shortNames: - - net-attach-def - versions: - - name: v1 - served: true - storage: true - schema: - openAPIV3Schema: - description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing - Working Group to express the intent for attaching pods to one or more logical or physical - networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this represen - tation of an object. Servers should convert recognized schemas to the - latest internal value, and may reject unrecognized values. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' - type: object - properties: - config: - description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' - type: string diff --git a/roles/network_plugin/multus/files/multus-serviceaccount.yml b/roles/network_plugin/multus/files/multus-serviceaccount.yml deleted file mode 100644 index 62423082ca0..00000000000 --- a/roles/network_plugin/multus/files/multus-serviceaccount.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: multus - namespace: kube-system diff --git a/roles/network_plugin/multus/meta/main.yml b/roles/network_plugin/multus/meta/main.yml deleted file mode 100644 index 9b7065f1854..00000000000 --- a/roles/network_plugin/multus/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: network_plugin/cni diff --git a/roles/network_plugin/multus/tasks/main.yml b/roles/network_plugin/multus/tasks/main.yml deleted file mode 100644 index 3b0819d81d9..00000000000 --- a/roles/network_plugin/multus/tasks/main.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Multus | Copy manifest files - copy: - src: "{{ item.file }}" - dest: "{{ kube_config_dir }}" - mode: "0644" - with_items: - - {name: multus-crd, file: multus-crd.yml, type: customresourcedefinition} - - {name: multus-serviceaccount, file: multus-serviceaccount.yml, type: serviceaccount} - - {name: multus-clusterrole, file: multus-clusterrole.yml, type: clusterrole} - - {name: multus-clusterrolebinding, file: multus-clusterrolebinding.yml, type: clusterrolebinding} - register: multus_manifest_1 - when: inventory_hostname == groups['kube_control_plane'][0] - -- name: Multus | Check container engine type - set_fact: - container_manager_types: "{{ ansible_play_hosts_all | map('extract', hostvars, ['container_manager']) | list | unique }}" - -- name: Multus | Copy manifest templates - template: - src: multus-daemonset.yml.j2 - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: multus-daemonset-containerd, file: multus-daemonset-containerd.yml, type: daemonset, engine: containerd } - - {name: multus-daemonset-docker, file: multus-daemonset-docker.yml, type: daemonset, engine: docker } - - {name: multus-daemonset-crio, file: multus-daemonset-crio.yml, type: daemonset, engine: crio } - register: multus_manifest_2 - vars: - host_query: "*|[?container_manager=='{{ container_manager }}']|[0].inventory_hostname" - vars_from_node: "{{ hostvars | json_query(host_query) }}" - delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: - - item.engine in container_manager_types - - hostvars[inventory_hostname].container_manager == item.engine - - inventory_hostname == vars_from_node - -- name: Multus | Start resources - kube: - name: "{{ item.item.name }}" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - delegate_to: "{{ groups['kube_control_plane'][0] }}" - run_once: true - with_items: "{{ (multus_manifest_1.results | default([])) + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | map('default', []) | list | json_query('[].results')) }}" - loop_control: - label: "{{ item.item.name if item != None else 'skipped' }}" - vars: - multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}" - when: - - not item is skipped diff --git a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 deleted file mode 100644 index 43d1193a92f..00000000000 --- a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 +++ /dev/null @@ -1,100 +0,0 @@ ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: -{% if container_manager_types | length >= 2 %} - name: kube-multus-{{ container_manager }}-{{ image_arch }} -{% else %} - name: kube-multus-ds-{{ image_arch }} -{% endif %} - namespace: kube-system - labels: - tier: node - app: multus -spec: - selector: - matchLabels: - tier: node - app: multus - template: - metadata: - labels: - tier: node - app: multus - spec: - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - priorityClassName: system-node-critical - nodeSelector: - kubernetes.io/arch: {{ image_arch }} -{% if container_manager_types | length >= 2 %} - kubespray.io/container_manager: {{ container_manager }} -{% endif %} - tolerations: - - operator: Exists - serviceAccountName: multus - initContainers: - - name: install-multus-binary - image: {{ multus_image_repo }}:{{ multus_image_tag }} - command: ["/install_multus"] - args: - - "--type" - - "thin" - resources: - requests: - cpu: "10m" - memory: "15Mi" - securityContext: - privileged: true - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - name: cnibin - mountPath: {{ multus_cni_bin_dir }} - mountPropagation: Bidirectional - containers: - - name: kube-multus - image: {{ multus_image_repo }}:{{ multus_image_tag }} - command: ["/thin_entrypoint"] - args: - - "--cni-conf-dir={{ multus_cni_conf_dir }}" - - "--multus-autoconfig-dir={{ multus_cni_conf_dir }}" - - "--cni-bin-dir={{ multus_cni_bin_dir }}" - - "--multus-conf-file={{ multus_conf_file }}" - - "--multus-kubeconfig-file-host={{ multus_kubeconfig_file_host }}" - - "--namespace-isolation={{ multus_namespace_isolation | string | lower }}" - resources: - requests: - cpu: "100m" - memory: "90Mi" - limits: - cpu: "100m" - memory: "90Mi" - securityContext: - privileged: true -{% if container_manager == 'crio' %} - capabilities: - add: ["SYS_ADMIN"] -{% endif %} - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: -{% if container_manager == 'crio' %} - - name: run - mountPath: {{ multus_cni_run_dir }} - mountPropagation: HostToContainer -{% endif %} - - name: cni - mountPath: {{ multus_cni_conf_dir }} - - name: cnibin - mountPath: {{ multus_cni_bin_dir }} - volumes: -{% if container_manager == 'crio' %} - - name: run - hostPath: - path: {{ multus_cni_run_dir_host }} -{% endif %} - - name: cni - hostPath: - path: {{ multus_cni_conf_dir_host }} - - name: cnibin - hostPath: - path: {{ multus_cni_bin_dir_host }} diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml deleted file mode 100644 index a16f3ec6f00..00000000000 --- a/roles/network_plugin/ovn4nfv/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: Ovn4nfv | Label control-plane node - command: >- - {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane - when: - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Ovn4nfv | Create ovn4nfv-k8s manifests - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: "0644" - with_items: - - {name: ovn-daemonset, file: ovn-daemonset.yml} - - {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml} - register: ovn4nfv_node_manifests From 5e7b23c73ad99ad871d8a2f0c87be19c7ced880a Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Wed, 10 Sep 2025 00:18:02 +0700 Subject: [PATCH 03/10] update kube config --- .gitignore | 3 +- .../2SpeedLab/group_vars/all/containerd.yml | 10 ++--- .../group_vars/k8s_cluster/addons.yml | 28 ++++++------- .../group_vars/k8s_cluster/k8s-cluster.yml | 2 + .../container-engine/containerd/meta/main.yml | 1 + .../nerdctl/handlers/main.yml | 12 ++++++ roles/container-engine/nerdctl/tasks/main.yml | 36 ++++++++++++++++ .../nerdctl/templates/nerdctl.toml.j2 | 9 ++++ roles/network_plugin/cilium/defaults/main.yml | 42 +++++++++---------- roles/network_plugin/cilium/tasks/apply.yml | 24 +++++------ roles/network_plugin/cilium/tasks/install.yml | 6 +-- 11 files changed, 117 insertions(+), 56 deletions(-) create mode 100644 roles/container-engine/nerdctl/handlers/main.yml create mode 100644 roles/container-engine/nerdctl/tasks/main.yml create mode 100644 roles/container-engine/nerdctl/templates/nerdctl.toml.j2 diff --git a/.gitignore b/.gitignore index 62b9bf87dba..969166853bd 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ contrib/offline/offline-files.tar.gz .idea .vscode .tox -.cache +.cacheheme *.bak *.tfstate *.tfstate*backup @@ -119,3 +119,4 @@ tmp.md # Ansible collection files kubernetes_sigs-kubespray*tar.gz ansible_collections +inventory/2SpeedLab/inventory.ini diff --git a/inventory/2SpeedLab/group_vars/all/containerd.yml b/inventory/2SpeedLab/group_vars/all/containerd.yml index efa1769fc2c..871a222edcd 100644 --- a/inventory/2SpeedLab/group_vars/all/containerd.yml +++ b/inventory/2SpeedLab/group_vars/all/containerd.yml @@ -1,12 +1,12 @@ --- # Please see roles/container-engine/containerd/defaults/main.yml for more configuration options -# containerd_storage_dir: "/var/lib/containerd" -# containerd_state_dir: "/run/containerd" -# containerd_oom_score: 0 +containerd_storage_dir: "/var/lib/containerd" +containerd_state_dir: "/run/containerd" +containerd_oom_score: 0 -# containerd_default_runtime: "runc" -# containerd_snapshotter: "native" +containerd_default_runtime: "runc" +containerd_snapshotter: "native" # containerd_runc_runtime: # name: runc diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml index cad27909de9..10ec18e2ac1 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml @@ -223,20 +223,20 @@ argocd_enabled: false # The plugin manager for kubectl # Kube VIP -kube_vip_enabled: true -kube_vip_arp_enabled: true -kube_vip_controlplane_enabled: true -kube_vip_address: 100.10.0.2 -loadbalancer_apiserver: - address: "{{ kube_vip_address }}" - port: 6443 -kube_vip_interface: eth0 -kube_vip_services_enabled: true -kube_vip_dns_mode: first -kube_vip_cp_detect: false -kube_vip_leasename: plndr-cp-lock -kube_vip_enable_node_labeling: false -kube_vip_lb_fwdmethod: local +kube_vip_enabled: false +#kube_vip_arp_enabled: true +#kube_vip_controlplane_enabled: true +#kube_vip_address: 100.10.0.2 +#loadbalancer_apiserver: +# address: "{{ kube_vip_address }}" +# port: 6443 +#kube_vip_interface: eth0 +#kube_vip_services_enabled: true +#kube_vip_dns_mode: first +#kube_vip_cp_detect: false +#kube_vip_leasename: plndr-cp-lock +#kube_vip_enable_node_labeling: false +#kube_vip_lb_fwdmethod: local # Node Feature Discovery node_feature_discovery_enabled: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml index 8e0d99f65ff..089d18de88e 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -39,6 +39,8 @@ credentials_dir: "{{ inventory_dir }}/credentials" # kube_oidc_auth: false # kube_token_auth: false +# disable kubeproxy +kube_proxy_remove: true ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) diff --git a/roles/container-engine/containerd/meta/main.yml b/roles/container-engine/containerd/meta/main.yml index 5629567722a..41c5b6a9749 100644 --- a/roles/container-engine/containerd/meta/main.yml +++ b/roles/container-engine/containerd/meta/main.yml @@ -3,3 +3,4 @@ dependencies: - role: container-engine/containerd-common - role: container-engine/runc - role: container-engine/crictl + - role: container-engine/nerdctl diff --git a/roles/container-engine/nerdctl/handlers/main.yml b/roles/container-engine/nerdctl/handlers/main.yml new file mode 100644 index 00000000000..1744706075c --- /dev/null +++ b/roles/container-engine/nerdctl/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: Get nerdctl completion + command: "{{ bin_dir }}/nerdctl completion bash" + changed_when: false + register: nerdctl_completion + check_mode: false + +- name: Install nerdctl completion + copy: + dest: /etc/bash_completion.d/nerdctl + content: "{{ nerdctl_completion.stdout }}" + mode: "0644" diff --git a/roles/container-engine/nerdctl/tasks/main.yml b/roles/container-engine/nerdctl/tasks/main.yml new file mode 100644 index 00000000000..d3cd0070cac --- /dev/null +++ b/roles/container-engine/nerdctl/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: Nerdctl | Download nerdctl + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.nerdctl) }}" + +- name: Nerdctl | Copy nerdctl binary from download dir + copy: + src: "{{ local_release_dir }}/nerdctl" + dest: "{{ bin_dir }}/nerdctl" + mode: "0755" + remote_src: true + owner: root + group: root + become: true + notify: + - Get nerdctl completion + - Install nerdctl completion + +- name: Nerdctl | Create configuration dir + file: + path: /etc/nerdctl + state: directory + mode: "0755" + owner: root + group: root + become: true + +- name: Nerdctl | Install nerdctl configuration + template: + src: nerdctl.toml.j2 + dest: /etc/nerdctl/nerdctl.toml + mode: "0644" + owner: root + group: root + become: true diff --git a/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 b/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 new file mode 100644 index 00000000000..e9bf6006184 --- /dev/null +++ b/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 @@ -0,0 +1,9 @@ +debug = false +debug_full = false +address = "{{ cri_socket }}" +namespace = "k8s.io" +snapshotter = "{{ nerdctl_snapshotter | default('overlayfs') }}" +cni_path = "/opt/cni/bin" +cni_netconfpath = "/etc/cni/net.d" +cgroup_manager = "{{ kubelet_cgroup_driver | default('systemd') }}" +hosts_dir = ["{{ containerd_cfg_dir }}/certs.d"] \ No newline at end of file diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index 42c54e5af6b..a081a2c89db 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -50,7 +50,7 @@ cilium_tunnel_mode: vxlan cilium_loadbalancer_mode: snat # -- Configure Loadbalancer IP Pools -cilium_loadbalancer_ip_pools: [] +cilium_loadbalancer_ip_pools: [ ] # Optional features cilium_enable_prometheus: false @@ -153,7 +153,7 @@ cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" ### Enable Hubble Metrics (deprecated) cilium_enable_hubble_metrics: false ### if cilium_enable_hubble_metrics: true -cilium_hubble_metrics: [] +cilium_hubble_metrics: [ ] # - dns # - drop # - tcp @@ -171,9 +171,9 @@ cilium_hubble_export_file_max_size_mb: "10" cilium_hubble_export_dynamic_enabled: false cilium_hubble_export_dynamic_config_content: - name: all - fieldMask: [] - includeFilters: [] - excludeFilters: [] + fieldMask: [ ] + includeFilters: [ ] + excludeFilters: [ ] filePath: "/var/run/cilium/hubble/events.log" # Override the DNS suffix that Hubble-Relay uses to resolve its peer service. @@ -212,14 +212,14 @@ cilium_ipam_mode: cluster-pool # Extra arguments for the Cilium agent -cilium_agent_custom_args: [] # deprecated -cilium_agent_extra_args: [] +cilium_agent_custom_args: [ ] # deprecated +cilium_agent_extra_args: [ ] # For adding and mounting extra volumes to the cilium agent -cilium_agent_extra_volumes: [] -cilium_agent_extra_volume_mounts: [] +cilium_agent_extra_volumes: [ ] +cilium_agent_extra_volume_mounts: [ ] -cilium_agent_extra_env_vars: [] +cilium_agent_extra_env_vars: [ ] cilium_operator_replicas: 2 @@ -230,15 +230,15 @@ cilium_operator_api_serve_addr: "127.0.0.1:9234" ## cilium_config_extra_vars: ## var1: "value1" ## var2: "value2" -cilium_config_extra_vars: {} +cilium_config_extra_vars: { } # For adding and mounting extra volumes to the cilium operator -cilium_operator_extra_volumes: [] -cilium_operator_extra_volume_mounts: [] +cilium_operator_extra_volumes: [ ] +cilium_operator_extra_volume_mounts: [ ] # Extra arguments for the Cilium Operator -cilium_operator_custom_args: [] # deprecated -cilium_operator_extra_args: [] +cilium_operator_custom_args: [ ] # deprecated +cilium_operator_extra_args: [ ] # Tolerations of the cilium operator cilium_operator_tolerations: @@ -314,19 +314,19 @@ cilium_enable_bgp_control_plane: false # -- Configure BGP Instances (New bgpv2 API v1.16+) -cilium_bgp_cluster_configs: [] +cilium_bgp_cluster_configs: [ ] # -- Configure BGP Peers (New bgpv2 API v1.16+) -cilium_bgp_peer_configs: [] +cilium_bgp_peer_configs: [ ] # -- Configure BGP Advertisements (New bgpv2 API v1.16+) -cilium_bgp_advertisements: [] +cilium_bgp_advertisements: [ ] # -- Configure BGP Node Config Overrides (New bgpv2 API v1.16+) -cilium_bgp_node_config_overrides: [] +cilium_bgp_node_config_overrides: [ ] # -- Configure BGP Peers (Legacy < v1.16) -cilium_bgp_peering_policies: [] +cilium_bgp_peering_policies: [ ] # -- Whether to enable CNP status updates. cilium_disable_cnp_status_updates: true @@ -365,4 +365,4 @@ cilium_install_extra_flags: "" # Cilium extra values, use any values from cilium Helm Chart # ref: https://docs.cilium.io/en/stable/helm-reference/ -cilium_extra_values: {} +cilium_extra_values: { } diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml index 546a0a4920a..18860610709 100644 --- a/roles/network_plugin/cilium/tasks/apply.yml +++ b/roles/network_plugin/cilium/tasks/apply.yml @@ -40,7 +40,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - {name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool} + - { name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_lbippool_crd_ready is defined and cillium_lbippool_crd_ready.rc is defined and cillium_lbippool_crd_ready.rc == 0 @@ -54,7 +54,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - {name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool} + - { name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_lbippool_crd_ready is defined and cillium_lbippool_crd_ready.rc is defined and cillium_lbippool_crd_ready.rc == 0 @@ -76,7 +76,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - {name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy} + - { name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpppolicy_crd_ready is defined and cillium_bgpppolicy_crd_ready.rc is defined and cillium_bgpppolicy_crd_ready.rc == 0 @@ -90,7 +90,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - {name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy} + - { name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpppolicy_crd_ready is defined and cillium_bgpppolicy_crd_ready.rc is defined and cillium_bgpppolicy_crd_ready.rc == 0 @@ -112,7 +112,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - {name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig} + - { name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpcconfig_crd_ready is defined and cillium_bgpcconfig_crd_ready.rc is defined and cillium_bgpcconfig_crd_ready.rc == 0 @@ -126,7 +126,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - {name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig} + - { name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpcconfig_crd_ready is defined and cillium_bgpcconfig_crd_ready.rc is defined and cillium_bgpcconfig_crd_ready.rc == 0 @@ -148,7 +148,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - {name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig} + - { name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgppconfig_crd_ready is defined and cillium_bgppconfig_crd_ready.rc is defined and cillium_bgppconfig_crd_ready.rc == 0 @@ -162,7 +162,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - {name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig} + - { name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgppconfig_crd_ready is defined and cillium_bgppconfig_crd_ready.rc is defined and cillium_bgppconfig_crd_ready.rc == 0 @@ -184,7 +184,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - {name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement} + - { name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpadvert_crd_ready is defined and cillium_bgpadvert_crd_ready.rc is defined and cillium_bgpadvert_crd_ready.rc == 0 @@ -198,7 +198,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - {name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement} + - { name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement } when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpadvert_crd_ready is defined and cillium_bgpadvert_crd_ready.rc is defined and cillium_bgpadvert_crd_ready.rc == 0 @@ -220,7 +220,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - {name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride} + - { name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride } when: - inventory_hostname == groups['kube_control_plane'][0] - cilium_bgp_node_config_crd_ready is defined and cilium_bgp_node_config_crd_ready.rc is defined and cilium_bgp_node_config_crd_ready.rc == 0 @@ -234,7 +234,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - {name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride} + - { name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride } when: - inventory_hostname == groups['kube_control_plane'][0] - cilium_bgp_node_config_crd_ready is defined and cilium_bgp_node_config_crd_ready.rc is defined and cilium_bgp_node_config_crd_ready.rc == 0 diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml index 252ba093f4a..3819e7e80ff 100644 --- a/roles/network_plugin/cilium/tasks/install.yml +++ b/roles/network_plugin/cilium/tasks/install.yml @@ -24,9 +24,9 @@ state: hard force: true loop: - - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} - - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} - - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + - { s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt" } + - { s: "{{ kube_etcd_cert_file }}", d: "cert.crt" } + - { s: "{{ kube_etcd_key_file }}", d: "key.pem" } when: - cilium_identity_allocation_mode == "kvstore" From 2c6a43d26d593d4ede7cdafd96f04deab4cab596 Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Fri, 12 Sep 2025 20:54:25 +0700 Subject: [PATCH 04/10] update kube config --- .../group_vars/k8s_cluster/k8s-net-cilium.yml | 14 +++++++------- roles/network_plugin/cilium/defaults/main.yml | 9 ++++++--- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml index 78b7dc4c8ca..dce3b829892 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -34,13 +34,13 @@ cilium_identity_allocation_mode: crd # kube_etcd_key_file: cert-key.pem # Limits for apps -# cilium_memory_limit: 500M -# cilium_cpu_limit: 500m -# cilium_memory_requests: 64M -# cilium_cpu_requests: 100m +cilium_memory_limit: 500M +cilium_cpu_limit: 500m +cilium_memory_requests: 64M +cilium_cpu_requests: 100m # Overlay Network Mode -# cilium_tunnel_mode: vxlan +cilium_tunnel_mode: vxlan # LoadBalancer Mode (snat/dsr/hybrid) Ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#dsr-mode # cilium_loadbalancer_mode: snat @@ -57,7 +57,7 @@ cilium_identity_allocation_mode: crd # Only effective when monitor aggregation is set to "medium" or higher. # cilium_monitor_aggregation_flags: "all" # Kube Proxy Replacement mode (strict/partial) -# cilium_kube_proxy_replacement: partial +cilium_kube_proxy_replacement: strict # If upgrading from Cilium < 1.5, you may want to override some of these options # to prevent service disruptions. See also: @@ -241,7 +241,7 @@ cilium_ipam_mode: kubernetes # -- Enables masquerading of IPv4 traffic leaving the node from endpoints. # Available for Cilium v1.10 and up -# cilium_enable_ipv4_masquerade: true +cilium_enable_ipv4_masquerade: true # -- Enables masquerading of IPv6 traffic leaving the node from endpoints. # Available for Cilium v1.10 and up # cilium_enable_ipv6_masquerade: true diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index a081a2c89db..5381033d6f7 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -213,7 +213,10 @@ cilium_ipam_mode: cluster-pool # Extra arguments for the Cilium agent cilium_agent_custom_args: [ ] # deprecated -cilium_agent_extra_args: [ ] +cilium_agent_extra_args: + - --agent-health-port=9879 + - --disable-envoy-version-check + - --hostbin-path=/usr/local/bin # Alternative path # For adding and mounting extra volumes to the cilium agent cilium_agent_extra_volumes: [ ] @@ -272,8 +275,8 @@ cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" # Available for Cilium v1.11 and up cilium_cgroup_auto_mount: true # -- Configure cgroup root where cgroup2 filesystem is mounted on the host -cilium_cgroup_host_root: "/run/cilium/cgroupv2" - +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" +cilium_cgroup_host_root: /sys/fs/cgroup # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. cilium_bpf_map_dynamic_size_ratio: "0.0025" From 63280ba210b7a4e963b1be3112669d04f91d279b Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Sun, 14 Sep 2025 01:39:52 +0700 Subject: [PATCH 05/10] update kube config --- .../group_vars/k8s_cluster/k8s-cluster.yml | 4 ++-- .../group_vars/k8s_cluster/k8s-net-cilium.yml | 13 +++++++------ .../sample/group_vars/k8s_cluster/k8s-cluster.yml | 2 +- roles/kubespray_defaults/defaults/main/download.yml | 2 +- roles/kubespray_defaults/defaults/main/main.yml | 2 +- roles/network_plugin/cilium/defaults/main.yml | 4 ++-- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml index 089d18de88e..2ff454bb9ae 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -23,7 +23,7 @@ local_release_dir: "/tmp/releases" retry_stagger: 5 # This is the user that owns tha cluster installation. -kube_owner: kube +kube_owner: root # This is the group that the cert creation scripts chgrp the # cert files to. Not really changeable... @@ -168,7 +168,7 @@ dns_mode: coredns # Enable nodelocal dns cache enable_nodelocaldns: true enable_nodelocaldns_secondary: false -nodelocaldns_ip: 169.254.25.10 +#nodelocaldns_ip: 169.254.25.10 nodelocaldns_health_port: 9254 nodelocaldns_second_health_port: 9256 nodelocaldns_bind_metrics_host_ip: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml index dce3b829892..52644d3c75a 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -10,7 +10,7 @@ cilium_l2announcements: false # Cilium agent health port -# cilium_agent_health_port: "9879" +cilium_agent_health_port: "9879" # Identity allocation mode selects how identities are shared between cilium # nodes by setting how they are stored. The options are "crd" or "kvstore". @@ -57,7 +57,7 @@ cilium_tunnel_mode: vxlan # Only effective when monitor aggregation is set to "medium" or higher. # cilium_monitor_aggregation_flags: "all" # Kube Proxy Replacement mode (strict/partial) -cilium_kube_proxy_replacement: strict +cilium_kube_proxy_replacement: true # If upgrading from Cilium < 1.5, you may want to override some of these options # to prevent service disruptions. See also: @@ -103,11 +103,11 @@ cilium_kube_proxy_replacement: strict # cilium_native_routing_cidr_ipv6: "" # Enable transparent network encryption. -cilium_encryption_enabled: true +cilium_encryption_enabled: false # Encryption method. Can be either ipsec or wireguard. # Only effective when `cilium_encryption_enabled` is set to true. -cilium_encryption_type: "wireguard" +#cilium_encryption_type: "wireguard" # Enable encryption for pure node to node traffic. # This option is only effective when `cilium_encryption_type` is set to `ipsec`. @@ -149,7 +149,7 @@ cilium_encryption_type: "wireguard" # Hubble ### Enable Hubble without install -cilium_enable_hubble: true +cilium_enable_hubble: false ### Enable Hubble-ui ### Installed by default when hubble is enabled. To disable set to false # cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" @@ -184,7 +184,8 @@ cilium_hubble_metrics: cilium_ipam_mode: kubernetes # Extra arguments for the Cilium agent -# cilium_agent_custom_args: [] +cilium_agent_custom_args: + - --write-cni-conf-when-ready=/host/etc/cni/net.d/05-cilium.conflist # For adding and mounting extra volumes to the cilium agent # cilium_agent_extra_volumes: [] diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index cb9fa2438e7..57b5884131e 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -23,7 +23,7 @@ local_release_dir: "/tmp/releases" retry_stagger: 5 # This is the user that owns tha cluster installation. -kube_owner: kube +kube_owner: root # This is the group that the cert creation scripts chgrp the # cert files to. Not really changeable... diff --git a/roles/kubespray_defaults/defaults/main/download.yml b/roles/kubespray_defaults/defaults/main/download.yml index 91745660e8d..82436003e1e 100644 --- a/roles/kubespray_defaults/defaults/main/download.yml +++ b/roles/kubespray_defaults/defaults/main/download.yml @@ -114,7 +114,7 @@ flannel_version: 0.26.7 flannel_cni_version: 1.7.1-flannel1 cni_version: "{{ (cni_binary_checksums['amd64'] | dict2items)[0].key }}" -cilium_version: "1.17.7" +cilium_version: "1.18.1" cilium_cli_version: "{{ (ciliumcli_binary_checksums['amd64'] | dict2items)[0].key }}" cilium_enable_hubble: false diff --git a/roles/kubespray_defaults/defaults/main/main.yml b/roles/kubespray_defaults/defaults/main/main.yml index b6795ff40ab..2e3cbadfad4 100644 --- a/roles/kubespray_defaults/defaults/main/main.yml +++ b/roles/kubespray_defaults/defaults/main/main.yml @@ -194,7 +194,7 @@ kube_cert_compat_dir: "/etc/kubernetes/pki" kube_token_dir: "{{ kube_config_dir }}/tokens" # This is the user that owns the cluster installation. -kube_owner: kube +kube_owner: root # This is the group that the cert creation scripts chgrp the # cert files to. Not really changeable... diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index 5381033d6f7..3a2267a5336 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -59,7 +59,7 @@ cilium_enable_portmap: false # Monitor aggregation level (none/low/medium/maximum) cilium_monitor_aggregation: medium # Kube Proxy Replacement mode (true/false) -cilium_kube_proxy_replacement: false +cilium_kube_proxy_replacement: true # If not defined `cilium_dns_proxy_enable_transparent_mode`, it will following the Cilium behavior. # When Cilium is configured to replace kube-proxy, it automatically enables dnsProxy, which will conflict with nodelocaldns. @@ -289,7 +289,7 @@ cilium_enable_ipv4_masquerade: true cilium_enable_ipv6_masquerade: true # -- Enable native IP masquerade support in eBPF -cilium_enable_bpf_masquerade: false +cilium_enable_bpf_masquerade: true # -- Configure whether direct routing mode should route traffic via # host stack (true) or directly and more efficiently out of BPF (false) if From b2d1ccdb5c6cfd8965b5526ee400441bb48d58fd Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Sun, 14 Sep 2025 18:08:45 +0700 Subject: [PATCH 06/10] add cilium version 1.18.1 --- tests/files/custom_cni/cilium.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/files/custom_cni/cilium.yaml b/tests/files/custom_cni/cilium.yaml index c89ae15ebf9..a9b2069609c 100644 --- a/tests/files/custom_cni/cilium.yaml +++ b/tests/files/custom_cni/cilium.yaml @@ -1034,7 +1034,7 @@ spec: type: Unconfined containers: - name: cilium-agent - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - cilium-agent @@ -1185,7 +1185,7 @@ spec: mountPath: /tmp initContainers: - name: config - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - cilium-dbg @@ -1208,7 +1208,7 @@ spec: # Required to mount cgroup2 filesystem on the underlying Kubernetes node. # We use nsenter command with host's cgroup and mount namespaces enabled. - name: mount-cgroup - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent env: - name: CGROUP_ROOT @@ -1245,7 +1245,7 @@ spec: drop: - ALL - name: apply-sysctl-overwrites - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent env: - name: BIN_PATH @@ -1283,7 +1283,7 @@ spec: # from a privileged container because the mount propagation bidirectional # only works from privileged containers. - name: mount-bpf-fs - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' @@ -1299,7 +1299,7 @@ spec: mountPath: /sys/fs/bpf mountPropagation: Bidirectional - name: clean-cilium-state - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - /init-container.sh @@ -1346,7 +1346,7 @@ spec: mountPath: /var/run/cilium # wait-for-kube-proxy # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent - name: install-cni-binaries - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - "/install-plugin.sh" @@ -1685,7 +1685,7 @@ spec: spec: containers: - name: cilium-operator - image: "quay.io/cilium/operator-generic:v1.16.3@sha256:6e2925ef47a1c76e183c48f95d4ce0d34a1e5e848252f910476c3e11ce1ec94b" + image: "quay.io/cilium/operator-generic:v1.18.1@sha256:97f4553afa443465bdfbc1cc4927c93f16ac5d78e4dd2706736e7395382201bc" imagePullPolicy: IfNotPresent command: - cilium-operator-generic From 5cb2b2e2eed47b48f576116be266de99f2b7e7dd Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Sun, 14 Sep 2025 22:58:56 +0700 Subject: [PATCH 07/10] update cilium config --- .../2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml index 52644d3c75a..fd53f14a6c7 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -34,8 +34,8 @@ cilium_identity_allocation_mode: crd # kube_etcd_key_file: cert-key.pem # Limits for apps -cilium_memory_limit: 500M -cilium_cpu_limit: 500m +cilium_memory_limit: 1000M +cilium_cpu_limit: 1000m cilium_memory_requests: 64M cilium_cpu_requests: 100m @@ -196,7 +196,7 @@ cilium_agent_custom_args: # cilium_operator_replicas: 2 # The address at which the cillium operator bind health check api -# cilium_operator_api_serve_addr: "127.0.0.1:9234" +cilium_operator_api_serve_addr: "0.0.0.0:9234" ## A dictionary of extra config variables to add to cilium-config, formatted like: ## cilium_config_extra_vars: From 8a1eacb46916822f42096a2f283a7ee9ad66a25e Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Wed, 17 Sep 2025 00:23:57 +0700 Subject: [PATCH 08/10] restore container engine --- .../group_vars/k8s_cluster/k8s-cluster.yml | 2 +- .../group_vars/k8s_cluster/k8s-net-cilium.yml | 4 +- .../containerd-common/defaults/main.yml | 12 + .../containerd/defaults/main.yml | 3 +- .../containerd/molecule/default/converge.yml | 2 +- .../containerd/molecule/default/molecule.yml | 11 +- .../containerd/molecule/default/prepare.yml | 29 + .../molecule/default/tests/test_default.py | 55 ++ .../containerd/molecule/default/verify.yml | 39 - .../containerd/tasks/main.yml | 44 +- .../containerd/tasks/reset.yml | 18 + .../containerd/templates/hosts.toml.j2 | 6 - .../containerd/vars/debian.yml | 7 + .../containerd/vars/ubuntu.yml | 7 + .../cri-dockerd/molecule/default/converge.yml | 2 +- .../cri-dockerd/molecule/default/molecule.yml | 7 +- .../cri-dockerd/molecule/default/prepare.yml | 48 ++ .../molecule/default/tests/test_default.py | 19 + .../cri-dockerd/molecule/default/verify.yml | 15 - .../container-engine/cri-o/defaults/main.yml | 2 +- roles/container-engine/cri-o/meta/main.yml | 2 + .../cri-o/molecule/default/converge.yml | 2 +- .../molecule/default}/files/10-mynet.conf | 0 .../molecule/default/files/container.json} | 4 +- .../molecule/default/files/sandbox.json} | 2 +- .../cri-o/molecule/default/molecule.yml | 7 +- .../molecule/default}/prepare.yml | 28 +- .../molecule/default/tests/test_default.py | 35 + .../cri-o/molecule/default/verify.yml | 11 - .../container-engine/docker/defaults/main.yml | 2 +- roles/container-engine/docker/tasks/main.yml | 15 +- roles/container-engine/docker/vars/debian.yml | 28 +- roles/container-engine/docker/vars/fedora.yml | 25 +- roles/container-engine/docker/vars/redhat.yml | 48 +- roles/container-engine/docker/vars/ubuntu.yml | 22 +- .../gvisor/molecule/default/converge.yml | 2 +- .../gvisor/molecule/default/molecule.yml | 37 +- .../gvisor/molecule/default/prepare.yml | 49 ++ .../molecule/default/tests/test_default.py | 29 + .../gvisor/molecule/default/verify.yml | 19 - .../kata-containers/defaults/main.yml | 10 + .../molecule/default/converge.yml | 11 + .../molecule/default/files/10-mynet.conf | 17 + .../molecule/default/files/container.json | 10 + .../molecule/default/files/sandbox.json | 10 + .../molecule/default/molecule.yml | 39 + .../molecule/default/prepare.yml | 49 ++ .../molecule/default/tests/test_default.py | 37 + .../kata-containers/tasks/main.yml | 54 ++ .../templates/configuration-qemu.toml.j2 | 706 ++++++++++++++++++ .../templates/containerd-shim-kata-v2.j2 | 2 + roles/container-engine/meta/main.yml | 36 + roles/container-engine/molecule/test_cri.yml | 24 - .../molecule/test_runtime.yml | 42 -- .../nerdctl/templates/nerdctl.toml.j2 | 2 +- roles/container-engine/skopeo/tasks/main.yml | 32 + .../validate-container-engine/tasks/main.yml | 6 +- .../container-engine/youki/defaults/main.yml | 3 + .../youki/molecule/default/converge.yml | 11 + .../molecule/default/files/10-mynet.conf | 17 + .../molecule/default/files/container.json | 10 + .../youki/molecule/default/files/sandbox.json | 10 + .../youki/molecule/default/molecule.yml | 39 + .../youki/molecule/default/prepare.yml | 49 ++ .../molecule/default/tests/test_default.py | 29 + roles/container-engine/youki/tasks/main.yml | 12 + 66 files changed, 1669 insertions(+), 297 deletions(-) create mode 100644 roles/container-engine/containerd/molecule/default/prepare.yml create mode 100644 roles/container-engine/containerd/molecule/default/tests/test_default.py delete mode 100644 roles/container-engine/containerd/molecule/default/verify.yml create mode 100644 roles/container-engine/containerd/vars/debian.yml create mode 100644 roles/container-engine/containerd/vars/ubuntu.yml create mode 100644 roles/container-engine/cri-dockerd/molecule/default/prepare.yml create mode 100644 roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py delete mode 100644 roles/container-engine/cri-dockerd/molecule/default/verify.yml rename roles/container-engine/{molecule => cri-o/molecule/default}/files/10-mynet.conf (100%) rename roles/container-engine/{molecule/templates/container.json.j2 => cri-o/molecule/default/files/container.json} (55%) rename roles/container-engine/{molecule/templates/sandbox.json.j2 => cri-o/molecule/default/files/sandbox.json} (79%) rename roles/container-engine/{molecule => cri-o/molecule/default}/prepare.yml (58%) create mode 100644 roles/container-engine/cri-o/molecule/default/tests/test_default.py delete mode 100644 roles/container-engine/cri-o/molecule/default/verify.yml create mode 100644 roles/container-engine/gvisor/molecule/default/prepare.yml create mode 100644 roles/container-engine/gvisor/molecule/default/tests/test_default.py delete mode 100644 roles/container-engine/gvisor/molecule/default/verify.yml create mode 100644 roles/container-engine/kata-containers/defaults/main.yml create mode 100644 roles/container-engine/kata-containers/molecule/default/converge.yml create mode 100644 roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf create mode 100644 roles/container-engine/kata-containers/molecule/default/files/container.json create mode 100644 roles/container-engine/kata-containers/molecule/default/files/sandbox.json create mode 100644 roles/container-engine/kata-containers/molecule/default/molecule.yml create mode 100644 roles/container-engine/kata-containers/molecule/default/prepare.yml create mode 100644 roles/container-engine/kata-containers/molecule/default/tests/test_default.py create mode 100644 roles/container-engine/kata-containers/tasks/main.yml create mode 100644 roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 create mode 100644 roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 delete mode 100644 roles/container-engine/molecule/test_cri.yml delete mode 100644 roles/container-engine/molecule/test_runtime.yml create mode 100644 roles/container-engine/skopeo/tasks/main.yml create mode 100644 roles/container-engine/youki/defaults/main.yml create mode 100644 roles/container-engine/youki/molecule/default/converge.yml create mode 100644 roles/container-engine/youki/molecule/default/files/10-mynet.conf create mode 100644 roles/container-engine/youki/molecule/default/files/container.json create mode 100644 roles/container-engine/youki/molecule/default/files/sandbox.json create mode 100644 roles/container-engine/youki/molecule/default/molecule.yml create mode 100644 roles/container-engine/youki/molecule/default/prepare.yml create mode 100644 roles/container-engine/youki/molecule/default/tests/test_default.py create mode 100644 roles/container-engine/youki/tasks/main.yml diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml index 2ff454bb9ae..4792391de6c 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -219,7 +219,7 @@ dns_domain: "{{ cluster_name }}" ## Container runtime ## docker for docker, crio for cri-o and containerd for containerd. ## Default: containerd -container_manager: containerd +container_manager: crio # Additional container runtimes kata_containers_enabled: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml index fd53f14a6c7..df864eba826 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -149,12 +149,12 @@ cilium_encryption_enabled: false # Hubble ### Enable Hubble without install -cilium_enable_hubble: false +cilium_enable_hubble: true ### Enable Hubble-ui ### Installed by default when hubble is enabled. To disable set to false # cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" ### Enable Hubble Metrics -# cilium_enable_hubble_metrics: false +cilium_enable_hubble_metrics: true ### if cilium_enable_hubble_metrics: true cilium_hubble_metrics: - dns diff --git a/roles/container-engine/containerd-common/defaults/main.yml b/roles/container-engine/containerd-common/defaults/main.yml index bceb5c5778b..ae1c6e05a0c 100644 --- a/roles/container-engine/containerd-common/defaults/main.yml +++ b/roles/container-engine/containerd-common/defaults/main.yml @@ -3,3 +3,15 @@ # manager controlled installs to direct download ones. containerd_package: 'containerd.io' yum_repo_dir: /etc/yum.repos.d + +# Keep minimal repo information around for cleanup +containerd_repo_info: + repos: + +# Ubuntu docker-ce repo +containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" +containerd_ubuntu_repo_component: "stable" + +# Debian docker-ce repo +containerd_debian_repo_base_url: "https://download.docker.com/linux/debian" +containerd_debian_repo_component: "stable" diff --git a/roles/container-engine/containerd/defaults/main.yml b/roles/container-engine/containerd/defaults/main.yml index a0865bd782b..7f76ef33108 100644 --- a/roles/container-engine/containerd/defaults/main.yml +++ b/roles/container-engine/containerd/defaults/main.yml @@ -64,8 +64,7 @@ containerd_registries_mirrors: skip_verify: false # ca: ["/etc/certs/mirror.pem"] # client: [["/etc/certs/client.pem", ""],["/etc/certs/client.cert", "/etc/certs/client.key"]] -# header: -# Authorization: "Basic XXX" + containerd_max_container_log_line_size: 16384 # If enabled it will allow non root users to use port numbers <1024 diff --git a/roles/container-engine/containerd/molecule/default/converge.yml b/roles/container-engine/containerd/molecule/default/converge.yml index 2a061fcb361..7847871e28b 100644 --- a/roles/container-engine/containerd/molecule/default/converge.yml +++ b/roles/container-engine/containerd/molecule/default/converge.yml @@ -5,5 +5,5 @@ vars: container_manager: containerd roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/containerd diff --git a/roles/container-engine/containerd/molecule/default/molecule.yml b/roles/container-engine/containerd/molecule/default/molecule.yml index b62b9493222..0ad3b794656 100644 --- a/roles/container-engine/containerd/molecule/default/molecule.yml +++ b/roles/container-engine/containerd/molecule/default/molecule.yml @@ -1,16 +1,16 @@ --- role_name_check: 1 platforms: - - cloud_image: ubuntu-2404 - name: ubuntu24 + - cloud_image: ubuntu-2004 + name: ubuntu20 vm_cpu_cores: 1 vm_memory: 1024 node_groups: - kube_control_plane - kube_node - k8s_cluster - - cloud_image: debian-12 - name: debian12 + - cloud_image: debian-11 + name: debian11 vm_cpu_cores: 1 vm_memory: 1024 node_groups: @@ -35,6 +35,5 @@ provisioner: timeout: 120 playbooks: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/containerd/molecule/default/prepare.yml b/roles/container-engine/containerd/molecule/default/prepare.yml new file mode 100644 index 00000000000..a3d09ad8005 --- /dev/null +++ b/roles/container-engine/containerd/molecule/default/prepare.yml @@ -0,0 +1,29 @@ +--- +- name: Prepare + hosts: all + gather_facts: false + become: true + vars: + ignore_assert_errors: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: kubernetes/preinstall + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare CNI + hosts: all + gather_facts: false + become: true + vars: + ignore_assert_errors: true + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni diff --git a/roles/container-engine/containerd/molecule/default/tests/test_default.py b/roles/container-engine/containerd/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..e1d915179bf --- /dev/null +++ b/roles/container-engine/containerd/molecule/default/tests/test_default.py @@ -0,0 +1,55 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("containerd") + assert svc.is_running + assert svc.is_enabled + + +def test_version(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/containerd/containerd.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: containerd" in cmd.stdout + + +@pytest.mark.parametrize('image, dest', [ + ('quay.io/kubespray/hello-world:latest', '/tmp/hello-world.tar') +]) +def test_image_pull_save_load(host, image, dest): + nerdctl = "/usr/local/bin/nerdctl" + dest_file = host.file(dest) + + with host.sudo(): + pull_cmd = host.command(nerdctl + " pull " + image) + assert pull_cmd.rc ==0 + + with host.sudo(): + save_cmd = host.command(nerdctl + " save -o " + dest + " " + image) + assert save_cmd.rc == 0 + assert dest_file.exists + + with host.sudo(): + load_cmd = host.command(nerdctl + " load < " + dest) + assert load_cmd.rc == 0 + + +@pytest.mark.parametrize('image', [ + ('quay.io/kubespray/hello-world:latest') +]) +def test_run(host, image): + nerdctl = "/usr/local/bin/nerdctl" + + with host.sudo(): + cmd = host.command(nerdctl + " -n k8s.io run " + image) + assert cmd.rc == 0 + assert "Hello from Docker" in cmd.stdout diff --git a/roles/container-engine/containerd/molecule/default/verify.yml b/roles/container-engine/containerd/molecule/default/verify.yml deleted file mode 100644 index 96ad82d2ac5..00000000000 --- a/roles/container-engine/containerd/molecule/default/verify.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Test containerd CRI - import_playbook: ../../../molecule/test_cri.yml - vars: - container_manager: containerd - cri_socket: unix:///var/run/containerd/containerd.sock - cri_name: containerd - -- name: Test nerdctl - hosts: all - gather_facts: false - become: true - tasks: - - name: Get kubespray defaults - import_role: - name: ../../../../../kubespray_defaults - - name: Test nerdctl commands - command: "{{ bin_dir }}/nerdctl {{ item | join(' ') }}" - vars: - image: quay.io/kubespray/hello-world:latest - loop: - - - pull - - "{{ image }}" - - - save - - -o - - /tmp/hello-world.tar - - "{{ image }}" - - - load - - -i - - /tmp/hello-world.tar - - - -n - - k8s.io - - run - - "{{ image }}" - register: nerdctl - - name: Check log from running a container - assert: - that: - - ('Hello from Docker' in nerdctl.results[3].stdout) diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index ae726b78db5..39005b97ccf 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -1,4 +1,31 @@ --- +- name: Fail containerd setup if distribution is not supported + fail: + msg: "{{ ansible_distribution }} is not supported by containerd." + when: + - not (allow_unsupported_distribution_setup | default(false)) and (ansible_distribution not in containerd_supported_distributions) + +- name: Containerd | Remove any package manager controlled containerd package + package: + name: "{{ containerd_package }}" + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + +- name: Containerd | Remove containerd repository + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + +- name: Containerd | Remove containerd repository + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + - name: Containerd | Download containerd include_tasks: "../../../download/tasks/download_file.yml" vars: @@ -14,6 +41,21 @@ - --strip-components=1 notify: Restart containerd +- name: Containerd | Remove orphaned binary + file: + path: "/usr/bin/{{ item }}" + state: absent + when: + - containerd_bin_dir != "/usr/bin" + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + ignore_errors: true # noqa ignore-errors + with_items: + - containerd + - containerd-shim + - containerd-shim-runc-v1 + - containerd-shim-runc-v2 + - ctr + - name: Containerd | Generate systemd service for containerd template: src: containerd.service.j2 @@ -73,8 +115,6 @@ notify: Restart containerd - name: Containerd | Configure containerd registries - # mirror configuration can contain sensitive information on headers configuration - no_log: "{{ not (unsafe_show_logs | bool) }}" block: - name: Containerd | Create registry directories file: diff --git a/roles/container-engine/containerd/tasks/reset.yml b/roles/container-engine/containerd/tasks/reset.yml index 0e70cded4d7..517e56da670 100644 --- a/roles/container-engine/containerd/tasks/reset.yml +++ b/roles/container-engine/containerd/tasks/reset.yml @@ -1,4 +1,22 @@ --- +- name: Containerd | Remove containerd repository for RedHat os family + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + tags: + - reset_containerd + +- name: Containerd | Remove containerd repository for Debian os family + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + tags: + - reset_containerd + - name: Containerd | Stop containerd service service: name: containerd diff --git a/roles/container-engine/containerd/templates/hosts.toml.j2 b/roles/container-engine/containerd/templates/hosts.toml.j2 index 0f5b3d013b5..b2b16a65ffb 100644 --- a/roles/container-engine/containerd/templates/hosts.toml.j2 +++ b/roles/container-engine/containerd/templates/hosts.toml.j2 @@ -10,10 +10,4 @@ server = "{{ item.server | default("https://" + item.prefix) }}" {% if mirror.client is defined %} client = [{% for pair in mirror.client %}["{{ pair[0] }}", "{{ pair[1] }}"]{% if not loop.last %},{% endif %}{% endfor %}] {% endif %} -{% if mirror.header is defined %} - [host."{{ mirror.host }}".header] -{% for key, value in mirror.header.items() %} - {{ key }} = ["{{ ([ value ] | flatten ) | join('","') }}"] -{% endfor %} -{% endif %} {% endfor %} diff --git a/roles/container-engine/containerd/vars/debian.yml b/roles/container-engine/containerd/vars/debian.yml new file mode 100644 index 00000000000..8b18d9a9f4e --- /dev/null +++ b/roles/container-engine/containerd/vars/debian.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_debian_repo_base_url }} + {{ ansible_distribution_release | lower }} + {{ containerd_debian_repo_component }} diff --git a/roles/container-engine/containerd/vars/ubuntu.yml b/roles/container-engine/containerd/vars/ubuntu.yml new file mode 100644 index 00000000000..dd775323dde --- /dev/null +++ b/roles/container-engine/containerd/vars/ubuntu.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_ubuntu_repo_base_url }} + {{ ansible_distribution_release | lower }} + {{ containerd_ubuntu_repo_component }} diff --git a/roles/container-engine/cri-dockerd/molecule/default/converge.yml b/roles/container-engine/cri-dockerd/molecule/default/converge.yml index 05053734380..be6fa381225 100644 --- a/roles/container-engine/cri-dockerd/molecule/default/converge.yml +++ b/roles/container-engine/cri-dockerd/molecule/default/converge.yml @@ -5,5 +5,5 @@ vars: container_manager: docker roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/cri-dockerd diff --git a/roles/container-engine/cri-dockerd/molecule/default/molecule.yml b/roles/container-engine/cri-dockerd/molecule/default/molecule.yml index 78702376404..cff276e423e 100644 --- a/roles/container-engine/cri-dockerd/molecule/default/molecule.yml +++ b/roles/container-engine/cri-dockerd/molecule/default/molecule.yml @@ -7,8 +7,8 @@ platforms: vm_memory: 1024 node_groups: - kube_control_plane - - name: ubuntu22 - cloud_image: ubuntu-2204 + - name: ubuntu20 + cloud_image: ubuntu-2004 vm_cpu_cores: 1 vm_memory: 1024 node_groups: @@ -27,6 +27,5 @@ provisioner: become: true playbooks: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/cri-dockerd/molecule/default/prepare.yml b/roles/container-engine/cri-dockerd/molecule/default/prepare.yml new file mode 100644 index 00000000000..b5328422a8d --- /dev/null +++ b/roles/container-engine/cri-dockerd/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py b/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..dc99b34981b --- /dev/null +++ b/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py @@ -0,0 +1,19 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run_pod(host): + run_command = "/usr/local/bin/crictl run --with-pull /tmp/container.json /tmp/sandbox.json" + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/cri-dockerd1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/cri-dockerd/molecule/default/verify.yml b/roles/container-engine/cri-dockerd/molecule/default/verify.yml deleted file mode 100644 index a11eb86f5fe..00000000000 --- a/roles/container-engine/cri-dockerd/molecule/default/verify.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Test cri-dockerd - import_playbook: ../../../molecule/test_cri.yml - vars: - container_manager: cri-dockerd - cri_socket: unix:///var/run/cri-dockerd.sock - cri_name: docker - -- name: Test running a container with docker - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: docker - # cri-dockerd does not support multiple runtime handler before 0.4.0 - # https://github.com/Mirantis/cri-dockerd/pull/350 - # TODO: check this when we upgrade cri-dockerd diff --git a/roles/container-engine/cri-o/defaults/main.yml b/roles/container-engine/cri-o/defaults/main.yml index b7e34654bad..4de3b178119 100644 --- a/roles/container-engine/cri-o/defaults/main.yml +++ b/roles/container-engine/cri-o/defaults/main.yml @@ -44,7 +44,7 @@ crio_root: "/var/lib/containers/storage" # The crio_runtimes variable defines a list of OCI compatible runtimes. crio_runtimes: - name: crun - path: "{{ crio_runtime_bin_dir }}/crun" # Use crun in cri-o distributions, don't use 'crun' role + path: "{{ crio_runtime_bin_dir }}/crun" type: oci root: /run/crun diff --git a/roles/container-engine/cri-o/meta/main.yml b/roles/container-engine/cri-o/meta/main.yml index ec9d9a55e9b..99e803a5170 100644 --- a/roles/container-engine/cri-o/meta/main.yml +++ b/roles/container-engine/cri-o/meta/main.yml @@ -1,3 +1,5 @@ --- dependencies: + - role: container-engine/crun - role: container-engine/crictl + - role: container-engine/skopeo diff --git a/roles/container-engine/cri-o/molecule/default/converge.yml b/roles/container-engine/cri-o/molecule/default/converge.yml index 85361b4397e..376f07c4582 100644 --- a/roles/container-engine/cri-o/molecule/default/converge.yml +++ b/roles/container-engine/cri-o/molecule/default/converge.yml @@ -5,5 +5,5 @@ vars: container_manager: crio roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/cri-o diff --git a/roles/container-engine/molecule/files/10-mynet.conf b/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf similarity index 100% rename from roles/container-engine/molecule/files/10-mynet.conf rename to roles/container-engine/cri-o/molecule/default/files/10-mynet.conf diff --git a/roles/container-engine/molecule/templates/container.json.j2 b/roles/container-engine/cri-o/molecule/default/files/container.json similarity index 55% rename from roles/container-engine/molecule/templates/container.json.j2 rename to roles/container-engine/cri-o/molecule/default/files/container.json index fc52def81c9..bcd71e7e586 100644 --- a/roles/container-engine/molecule/templates/container.json.j2 +++ b/roles/container-engine/cri-o/molecule/default/files/container.json @@ -1,10 +1,10 @@ { "metadata": { - "name": "{{ container_runtime }}1" + "name": "runc1" }, "image": { "image": "quay.io/kubespray/hello-world:latest" }, - "log_path": "{{ container_runtime }}1.0.log", + "log_path": "runc1.0.log", "linux": {} } diff --git a/roles/container-engine/molecule/templates/sandbox.json.j2 b/roles/container-engine/cri-o/molecule/default/files/sandbox.json similarity index 79% rename from roles/container-engine/molecule/templates/sandbox.json.j2 rename to roles/container-engine/cri-o/molecule/default/files/sandbox.json index dc2894736bd..eb9dcb9d282 100644 --- a/roles/container-engine/molecule/templates/sandbox.json.j2 +++ b/roles/container-engine/cri-o/molecule/default/files/sandbox.json @@ -1,6 +1,6 @@ { "metadata": { - "name": "{{ container_runtime }}1", + "name": "runc1", "namespace": "default", "attempt": 1, "uid": "hdishd83djaidwnduwk28bcsb" diff --git a/roles/container-engine/cri-o/molecule/default/molecule.yml b/roles/container-engine/cri-o/molecule/default/molecule.yml index e5bf20e5df4..6bbaabf7af3 100644 --- a/roles/container-engine/cri-o/molecule/default/molecule.yml +++ b/roles/container-engine/cri-o/molecule/default/molecule.yml @@ -1,8 +1,8 @@ --- role_name_check: 1 platforms: - - name: ubuntu22 - cloud_image: ubuntu-2204 + - name: ubuntu20 + cloud_image: ubuntu-2004 vm_cpu_cores: 2 vm_memory: 1024 node_groups: @@ -43,6 +43,5 @@ provisioner: timeout: 120 playbooks: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/molecule/prepare.yml b/roles/container-engine/cri-o/molecule/default/prepare.yml similarity index 58% rename from roles/container-engine/molecule/prepare.yml rename to roles/container-engine/cri-o/molecule/default/prepare.yml index 9faf3a8656c..55ad5174d70 100644 --- a/roles/container-engine/molecule/prepare.yml +++ b/roles/container-engine/cri-o/molecule/default/prepare.yml @@ -6,15 +6,14 @@ vars: ignore_assert_errors: true roles: - - role: dynamic_groups - - role: bootstrap_os - - role: network_facts + - role: kubespray-defaults + - role: bootstrap-os - role: kubernetes/preinstall - role: adduser user: "{{ addusers.kube }}" tasks: - name: Download CNI - include_tasks: "../../download/tasks/download_file.yml" + include_tasks: "../../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.cni) }}" @@ -26,18 +25,29 @@ ignore_assert_errors: true kube_network_plugin: cni roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: network_plugin/cni tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json - name: Create /etc/cni/net.d directory file: path: /etc/cni/net.d state: directory - owner: root + owner: "{{ kube_owner }}" mode: "0755" - - name: Config bridge host-local CNI + - name: Setup CNI copy: - src: "10-mynet.conf" - dest: "/etc/cni/net.d/" + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" owner: root mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/cri-o/molecule/default/tests/test_default.py b/roles/container-engine/cri-o/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..3e38fa5b2ae --- /dev/null +++ b/roles/container-engine/cri-o/molecule/default/tests/test_default.py @@ -0,0 +1,35 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("crio") + assert svc.is_running + assert svc.is_enabled + + +def test_run(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/crio/crio.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: cri-o" in cmd.stdout + +def test_run_pod(host): + runtime = "crun" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/runc1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/cri-o/molecule/default/verify.yml b/roles/container-engine/cri-o/molecule/default/verify.yml deleted file mode 100644 index a40eb34d56a..00000000000 --- a/roles/container-engine/cri-o/molecule/default/verify.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Test CRI-O cri - import_playbook: ../../../molecule/test_cri.yml - vars: - container_manager: crio - cri_socket: unix:///var/run/crio/crio.sock - cri_name: cri-o -- name: Test running a container with crun - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: crun diff --git a/roles/container-engine/docker/defaults/main.yml b/roles/container-engine/docker/defaults/main.yml index 29e8904145f..543f8f294ec 100644 --- a/roles/container-engine/docker/defaults/main.yml +++ b/roles/container-engine/docker/defaults/main.yml @@ -1,5 +1,5 @@ --- -docker_version: '28.3' +docker_version: '28.0' docker_cli_version: "{{ docker_version }}" docker_package_info: diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index 62293264d8e..46f3bce88e5 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -60,17 +60,16 @@ environment: "{{ proxy_env }}" when: ansible_pkg_mgr == 'apt' -# ref to https://github.com/kubernetes-sigs/kubespray/issues/11086 & 12424 -- name: Convert -backports sources to archive.debian.org for bullseye and older - replace: - path: "{{ item }}" - regexp: '^(deb(?:-src)?\s+)(?:https?://)?(?:[^ ]+debian\.org)?([^ ]*/debian)(\s+{{ ansible_distribution_release }}-backports\b.*)' - replace: '\1http://archive.debian.org/debian\3' +# ref to https://github.com/kubernetes-sigs/kubespray/issues/11086 +- name: Remove the archived debian apt repository + lineinfile: + path: /etc/apt/sources.list + regexp: 'buster-backports' + state: absent backup: true - loop: "{{ query('fileglob', '/etc/apt/sources.list') }}" when: - ansible_os_family == 'Debian' - - ansible_distribution_release in ['bullseye', 'buster'] + - ansible_distribution_release == "buster" - name: Ensure docker-ce repository is enabled apt_repository: diff --git a/roles/container-engine/docker/vars/debian.yml b/roles/container-engine/docker/vars/debian.yml index ee2e932811f..f5a0fc99b9d 100644 --- a/roles/container-engine/docker/vars/debian.yml +++ b/roles/container-engine/docker/vars/debian.yml @@ -34,10 +34,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}=1.7.23-1" '1.7.24': "{{ containerd_package }}=1.7.24-1" '1.7.25': "{{ containerd_package }}=1.7.25-1" - '1.7.26': "{{ containerd_package }}=1.7.26-1" - '1.7.27': "{{ containerd_package }}=1.7.27-1" - 'stable': "{{ containerd_package }}=1.7.27-1" - 'edge': "{{ containerd_package }}=1.7.27-1" + 'stable': "{{ containerd_package }}=1.7.25-1" + 'edge': "{{ containerd_package }}=1.7.25-1" # https://download.docker.com/linux/debian/ docker_versioned_pkg: @@ -55,13 +53,10 @@ docker_versioned_pkg: '27.2': docker-ce=5:27.2.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.3': docker-ce=5:27.3.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce=5:27.4.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '27.5': docker-ce=5:27.5.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce=5:28.0.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce=5:28.1.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce=5:28.2.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'stable': docker-ce=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'edge': docker-ce=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '27.5': docker-ce=5:27.5.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli @@ -78,13 +73,10 @@ docker_cli_versioned_pkg: '27.2': docker-ce-cli=5:27.2.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.3': docker-ce-cli=5:27.3.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce-cli=5:27.4.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '27.5': docker-ce-cli=5:27.5.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce-cli=5:28.0.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce-cli=5:28.1.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce-cli=5:28.2.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce-cli=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'stable': docker-ce-cli=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'edge': docker-ce-cli=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '27.5': docker-ce-cli=5:27.5.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} docker_package_info: pkgs: diff --git a/roles/container-engine/docker/vars/fedora.yml b/roles/container-engine/docker/vars/fedora.yml index 5140fee12ba..f713acc0669 100644 --- a/roles/container-engine/docker/vars/fedora.yml +++ b/roles/container-engine/docker/vars/fedora.yml @@ -34,10 +34,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}-1.7.23-3.1.fc{{ ansible_distribution_major_version }}" '1.7.24': "{{ containerd_package }}-1.7.24-3.1.fc{{ ansible_distribution_major_version }}" '1.7.25': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}" - '1.7.26': "{{ containerd_package }}-1.7.26-3.1.fc{{ ansible_distribution_major_version }}" - '1.7.27': "{{ containerd_package }}-1.7.27-3.1.fc{{ ansible_distribution_major_version }}" - 'stable': "{{ containerd_package }}-1.7.27-3.1.fc{{ ansible_distribution_major_version }}" - 'edge': "{{ containerd_package }}-1.7.27-3.1.fc{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}" # https://docs.docker.com/install/linux/docker-ce/fedora/ # https://download.docker.com/linux/fedora//x86_64/stable/Packages/ @@ -55,13 +53,9 @@ docker_versioned_pkg: '27.3': docker-ce-3:27.3.1-1.fc{{ ansible_distribution_major_version }} '27.4': docker-ce-3:27.4.1-1.fc{{ ansible_distribution_major_version }} '27.5': docker-ce-3:27.5.1-1.fc{{ ansible_distribution_major_version }} - '28.0': docker-ce-3:28.0.4-1.fc{{ ansible_distribution_major_version }} - '28.1': docker-ce-3:28.1.1-1.fc{{ ansible_distribution_major_version }} - '28.2': docker-ce-3:28.2.2-1.fc{{ ansible_distribution_major_version }} - '28.3': docker-ce-3:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'stable': docker-ce-3:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'edge': docker-ce-3:28.3.3-1.fc{{ ansible_distribution_major_version }} - + '28.0': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }} docker_cli_versioned_pkg: 'latest': docker-ce-cli @@ -77,12 +71,9 @@ docker_cli_versioned_pkg: '27.3': docker-ce-cli-1:27.3.1-1.fc{{ ansible_distribution_major_version }} '27.4': docker-ce-cli-1:27.4.1-1.fc{{ ansible_distribution_major_version }} '27.5': docker-ce-cli-1:27.5.1-1.fc{{ ansible_distribution_major_version }} - '28.0': docker-ce-cli-1:28.0.4-1.fc{{ ansible_distribution_major_version }} - '28.1': docker-ce-cli-1:28.1.1-1.fc{{ ansible_distribution_major_version }} - '28.2': docker-ce-cli-1:28.2.2-1.fc{{ ansible_distribution_major_version }} - '28.3': docker-ce-cli-1:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'stable': docker-ce-cli-1:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'edge': docker-ce-cli-1:28.3.3-1.fc{{ ansible_distribution_major_version }} + '28.0': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }} docker_package_info: enablerepo: "docker-ce" diff --git a/roles/container-engine/docker/vars/redhat.yml b/roles/container-engine/docker/vars/redhat.yml index 3c832369f72..289453ab5c4 100644 --- a/roles/container-engine/docker/vars/redhat.yml +++ b/roles/container-engine/docker/vars/redhat.yml @@ -34,10 +34,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}-1.7.23-3.1.el{{ ansible_distribution_major_version }}" '1.7.24': "{{ containerd_package }}-1.7.24-3.1.el{{ ansible_distribution_major_version }}" '1.7.25': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}" - '1.7.26': "{{ containerd_package }}-1.7.26-3.1.el{{ ansible_distribution_major_version }}" - '1.7.27': "{{ containerd_package }}-1.7.27-3.1.el{{ ansible_distribution_major_version }}" - 'stable': "{{ containerd_package }}-1.7.27-3.1.el{{ ansible_distribution_major_version }}" - 'edge': "{{ containerd_package }}-1.7.27-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}" # https://docs.docker.com/engine/installation/linux/rhel/#install-from-a-package # https://download.docker.com/linux/rhel/>/x86_64/stable/Packages/ @@ -46,45 +44,39 @@ docker_versioned_pkg: 'latest': docker-ce '18.09': docker-ce-3:18.09.9-3.el7 '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} - '20.10': docker-ce-3:20.10.24-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} '23.0': docker-ce-3:23.0.6-1.el{{ ansible_distribution_major_version }} '24.0': docker-ce-3:24.0.9-1.el{{ ansible_distribution_major_version }} '26.0': docker-ce-3:26.0.2-1.el{{ ansible_distribution_major_version }} '26.1': docker-ce-3:26.1.4-1.el{{ ansible_distribution_major_version }} '27.0': docker-ce-3:27.0.3-1.el{{ ansible_distribution_major_version }} - '27.1': docker-ce-3:27.1.2-1.el{{ ansible_distribution_major_version }} - '27.2': docker-ce-3:27.2.1-1.el{{ ansible_distribution_major_version }} - '27.3': docker-ce-3:27.3.1-1.el{{ ansible_distribution_major_version }} - '27.4': docker-ce-3:27.4.1-1.el{{ ansible_distribution_major_version }} - '27.5': docker-ce-3:27.5.1-1.el{{ ansible_distribution_major_version }} - '28.0': docker-ce-3:28.0.4-1.el{{ ansible_distribution_major_version }} - '28.1': docker-ce-3:28.1.1-1.el{{ ansible_distribution_major_version }} - '28.2': docker-ce-3:28.2.2-1.el{{ ansible_distribution_major_version }} - '28.3': docker-ce-3:28.3.3-1.el{{ ansible_distribution_major_version }} - 'stable': docker-ce-3:28.3.3-1.el{{ ansible_distribution_major_version }} - 'edge': docker-ce-3:28.3.3-1.el{{ ansible_distribution_major_version }} + '27.1': docker-ce-3:27.1.3-1.el{{ ansible_distribution_major_version }} + '27.2': docker-ce-3:27.2.3-1.el{{ ansible_distribution_major_version }} + '27.3': docker-ce-3:27.3.3-1.el{{ ansible_distribution_major_version }} + '27.4': docker-ce-3:27.4.3-1.el{{ ansible_distribution_major_version }} + '27.5': docker-ce-3:27.5.3-1.el{{ ansible_distribution_major_version }} + '28.0': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }} docker_cli_versioned_pkg: 'latest': docker-ce-cli '18.09': docker-ce-cli-1:18.09.9-3.el7 '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} - '20.10': docker-ce-cli-1:20.10.24-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} '23.0': docker-ce-cli-1:23.0.6-1.el{{ ansible_distribution_major_version }} '24.0': docker-ce-cli-1:24.0.9-1.el{{ ansible_distribution_major_version }} '26.0': docker-ce-cli-1:26.0.2-1.el{{ ansible_distribution_major_version }} '26.1': docker-ce-cli-1:26.1.4-1.el{{ ansible_distribution_major_version }} '27.0': docker-ce-cli-1:27.0.3-1.el{{ ansible_distribution_major_version }} - '27.1': docker-ce-cli-1:27.1.2-1.el{{ ansible_distribution_major_version }} - '27.2': docker-ce-cli-1:27.2.1-1.el{{ ansible_distribution_major_version }} - '27.3': docker-ce-cli-1:27.3.1-1.el{{ ansible_distribution_major_version }} - '27.4': docker-ce-cli-1:27.4.1-1.el{{ ansible_distribution_major_version }} - '27.5': docker-ce-cli-1:27.5.1-1.el{{ ansible_distribution_major_version }} - '28.0': docker-ce-cli-1:28.0.4-1.el{{ ansible_distribution_major_version }} - '28.1': docker-ce-cli-1:28.1.1-1.el{{ ansible_distribution_major_version }} - '28.2': docker-ce-cli-1:28.2.2-1.el{{ ansible_distribution_major_version }} - '28.3': docker-ce-cli-1:28.3.3-1.el{{ ansible_distribution_major_version }} - 'stable': docker-ce-cli-1:28.3.3-1.el{{ ansible_distribution_major_version }} - 'edge': docker-ce-cli-1:28.3.3-1.el{{ ansible_distribution_major_version }} + '27.1': docker-ce-cli-1:27.1.3-1.el{{ ansible_distribution_major_version }} + '27.2': docker-ce-cli-1:27.2.3-1.el{{ ansible_distribution_major_version }} + '27.3': docker-ce-cli-1:27.3.3-1.el{{ ansible_distribution_major_version }} + '27.4': docker-ce-cli-1:27.4.3-1.el{{ ansible_distribution_major_version }} + '27.5': docker-ce-cli-1:27.5.3-1.el{{ ansible_distribution_major_version }} + '28.0': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }} docker_package_info: enablerepo: "docker-ce" diff --git a/roles/container-engine/docker/vars/ubuntu.yml b/roles/container-engine/docker/vars/ubuntu.yml index 5d9158ec229..1d4e7bb3e7c 100644 --- a/roles/container-engine/docker/vars/ubuntu.yml +++ b/roles/container-engine/docker/vars/ubuntu.yml @@ -27,10 +27,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}=1.7.23-1" '1.7.24': "{{ containerd_package }}=1.7.24-1" '1.7.25': "{{ containerd_package }}=1.7.25-1" - '1.7.26': "{{ containerd_package }}=1.7.26-1" - '1.7.27': "{{ containerd_package }}=1.7.27-1" - 'stable': "{{ containerd_package }}=1.7.27-1" - 'edge': "{{ containerd_package }}=1.7.27-1" + 'stable': "{{ containerd_package }}=1.7.25-1" + 'edge': "{{ containerd_package }}=1.7.25-1" # https://download.docker.com/linux/ubuntu/ docker_versioned_pkg: @@ -48,10 +46,9 @@ docker_versioned_pkg: '27.3': docker-ce=5:27.3.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce=5:27.4.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.5': docker-ce=5:27.5.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce=5:28.0.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce=5:28.1.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce=5:28.2.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli @@ -68,12 +65,9 @@ docker_cli_versioned_pkg: '27.3': docker-ce-cli=5:27.3.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce-cli=5:27.4.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.5': docker-ce-cli=5:27.5.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce-cli=5:28.0.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce-cli=5:28.1.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce-cli=5:28.2.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce-cli=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - 'stable': docker-ce-cli=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - 'edge': docker-ce-cli=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} docker_package_info: pkgs: diff --git a/roles/container-engine/gvisor/molecule/default/converge.yml b/roles/container-engine/gvisor/molecule/default/converge.yml index 552a8888962..b14d078a182 100644 --- a/roles/container-engine/gvisor/molecule/default/converge.yml +++ b/roles/container-engine/gvisor/molecule/default/converge.yml @@ -6,6 +6,6 @@ gvisor_enabled: true container_manager: containerd roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/containerd - role: container-engine/gvisor diff --git a/roles/container-engine/gvisor/molecule/default/molecule.yml b/roles/container-engine/gvisor/molecule/default/molecule.yml index f73a9775cc4..9bf49633149 100644 --- a/roles/container-engine/gvisor/molecule/default/molecule.yml +++ b/roles/container-engine/gvisor/molecule/default/molecule.yml @@ -1,18 +1,28 @@ --- role_name_check: 1 +driver: + name: vagrant + provider: + name: libvirt platforms: - - cloud_image: ubuntu-2404 - name: ubuntu24 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane + provider_options: + driver: kvm - name: almalinux9 - cloud_image: almalinux-9 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + box: almalinux/9 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane + provider_options: + driver: kvm provisioner: name: ansible env: @@ -21,8 +31,9 @@ provisioner: defaults: callbacks_enabled: profile_tasks timeout: 120 - playbooks: - create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml + inventory: + group_vars: + all: + become: true verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/gvisor/molecule/default/prepare.yml b/roles/container-engine/gvisor/molecule/default/prepare.yml new file mode 100644 index 00000000000..57c21f2dda2 --- /dev/null +++ b/roles/container-engine/gvisor/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/gvisor/molecule/default/tests/test_default.py b/roles/container-engine/gvisor/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..1cb7fb0ffb1 --- /dev/null +++ b/roles/container-engine/gvisor/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + gvisorruntime = "/usr/local/bin/runsc" + with host.sudo(): + cmd = host.command(gvisorruntime + " --version") + assert cmd.rc == 0 + assert "runsc version" in cmd.stdout + + +def test_run_pod(host): + runtime = "runsc" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/gvisor1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/gvisor/molecule/default/verify.yml b/roles/container-engine/gvisor/molecule/default/verify.yml deleted file mode 100644 index 35e847e5323..00000000000 --- a/roles/container-engine/gvisor/molecule/default/verify.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Test gvisor - hosts: all - gather_facts: false - tasks: - - name: Get kubespray defaults - import_role: - name: ../../../../../kubespray_defaults - - name: Test version - command: "{{ bin_dir }}/runsc --version" - register: runsc_version - failed_when: > - runsc_version is failed or - 'runsc version' not in runsc_version.stdout - -- name: Test run container - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: runsc diff --git a/roles/container-engine/kata-containers/defaults/main.yml b/roles/container-engine/kata-containers/defaults/main.yml new file mode 100644 index 00000000000..40bbc33d579 --- /dev/null +++ b/roles/container-engine/kata-containers/defaults/main.yml @@ -0,0 +1,10 @@ +--- +kata_containers_dir: /opt/kata +kata_containers_config_dir: /etc/kata-containers +kata_containers_containerd_bin_dir: /usr/local/bin + +kata_containers_qemu_default_memory: "{{ ansible_memtotal_mb }}" +kata_containers_qemu_debug: 'false' +kata_containers_qemu_sandbox_cgroup_only: 'true' +kata_containers_qemu_enable_mem_prealloc: 'false' +kata_containers_virtio_fs_cache: 'always' diff --git a/roles/container-engine/kata-containers/molecule/default/converge.yml b/roles/container-engine/kata-containers/molecule/default/converge.yml new file mode 100644 index 00000000000..a6fdf812a78 --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + kata_containers_enabled: true + container_manager: containerd + roles: + - role: kubespray-defaults + - role: container-engine/containerd + - role: container-engine/kata-containers diff --git a/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf b/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf new file mode 100644 index 00000000000..f10935b753b --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/roles/container-engine/kata-containers/molecule/default/files/container.json b/roles/container-engine/kata-containers/molecule/default/files/container.json new file mode 100644 index 00000000000..e2e9a56a730 --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "kata1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "kata1.0.log", + "linux": {} +} diff --git a/roles/container-engine/kata-containers/molecule/default/files/sandbox.json b/roles/container-engine/kata-containers/molecule/default/files/sandbox.json new file mode 100644 index 00000000000..326a578bed6 --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "kata1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/roles/container-engine/kata-containers/molecule/default/molecule.yml b/roles/container-engine/kata-containers/molecule/default/molecule.yml new file mode 100644 index 00000000000..8eaa5d7b87b --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/molecule.yml @@ -0,0 +1,39 @@ +--- +role_name_check: 1 +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + provider_options: + driver: kvm + - name: ubuntu22 + box: generic/ubuntu2204 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + provider_options: + driver: kvm +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra diff --git a/roles/container-engine/kata-containers/molecule/default/prepare.yml b/roles/container-engine/kata-containers/molecule/default/prepare.yml new file mode 100644 index 00000000000..a5abd27bb45 --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/kata-containers/molecule/default/tests/test_default.py b/roles/container-engine/kata-containers/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..e10fff4b788 --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/tests/test_default.py @@ -0,0 +1,37 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " version") + assert cmd.rc == 0 + assert "kata-runtime" in cmd.stdout + + +def test_run_check(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " check") + assert cmd.rc == 0 + assert "System is capable of running" in cmd.stdout + + +def test_run_pod(host): + runtime = "kata-qemu" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/kata1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml new file mode 100644 index 00000000000..5014c214a49 --- /dev/null +++ b/roles/container-engine/kata-containers/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- name: Kata-containers | Download kata binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.kata_containers) }}" + +- name: Kata-containers | Copy kata-containers binary + unarchive: + src: "{{ downloads.kata_containers.dest }}" + dest: "/" + mode: "0755" + owner: root + group: root + remote_src: true + +- name: Kata-containers | Create config directory + file: + path: "{{ kata_containers_config_dir }}" + state: directory + mode: "0755" + +- name: Kata-containers | Set configuration + template: + src: "{{ item }}.j2" + dest: "{{ kata_containers_config_dir }}/{{ item }}" + mode: "0644" + with_items: + - configuration-qemu.toml + +- name: Kata-containers | Set containerd bin + vars: + shim: "{{ item }}" + template: + dest: "{{ kata_containers_containerd_bin_dir }}/containerd-shim-kata-{{ item }}-v2" + src: containerd-shim-kata-v2.j2 + mode: "0755" + with_items: + - qemu + +- name: Kata-containers | Load vhost kernel modules + community.general.modprobe: + state: present + name: "{{ item }}" + with_items: + - vhost_vsock + - vhost_net + +- name: Kata-containers | Persist vhost kernel modules + copy: + dest: /etc/modules-load.d/kubespray-kata-containers.conf + mode: "0644" + content: | + vhost_vsock + vhost_net diff --git a/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 b/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 new file mode 100644 index 00000000000..15511442c6d --- /dev/null +++ b/roles/container-engine/kata-containers/templates/configuration-qemu.toml.j2 @@ -0,0 +1,706 @@ +# Copyright (c) 2017-2019 Intel Corporation +# Copyright (c) 2021 Adobe Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# XXX: WARNING: this file is auto-generated. +# XXX: +# XXX: Source file: "config/configuration-qemu.toml.in" +# XXX: Project: +# XXX: Name: Kata Containers +# XXX: Type: kata + +[hypervisor.qemu] +path = "/opt/kata/bin/qemu-system-x86_64" +{% if kata_containers_version is version('2.2.0', '>=') %} +kernel = "/opt/kata/share/kata-containers/vmlinux.container" +{% else %} +kernel = "/opt/kata/share/kata-containers/vmlinuz.container" +{% endif %} +image = "/opt/kata/share/kata-containers/kata-containers.img" +# initrd = "/opt/kata/share/kata-containers/kata-containers-initrd.img" +machine_type = "q35" + +# rootfs filesystem type: +# - ext4 (default) +# - xfs +# - erofs +rootfs_type="ext4" + +# Enable confidential guest support. +# Toggling that setting may trigger different hardware features, ranging +# from memory encryption to both memory and CPU-state encryption and integrity. +# The Kata Containers runtime dynamically detects the available feature set and +# aims at enabling the largest possible one, returning an error if none is +# available, or none is supported by the hypervisor. +# +# Known limitations: +# * Does not work by design: +# - CPU Hotplug +# - Memory Hotplug +# - NVDIMM devices +# +# Default false +# confidential_guest = true + +# Choose AMD SEV-SNP confidential guests +# In case of using confidential guests on AMD hardware that supports both SEV +# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default. +# Default false +# sev_snp_guest = true + +# Enable running QEMU VMM as a non-root user. +# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as +# a non-root random user. See documentation for the limitations of this mode. +# rootless = true + +# List of valid annotation names for the hypervisor +# Each member of the list is a regular expression, which is the base name +# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" +enable_annotations = ["enable_iommu"] + +# List of valid annotations values for the hypervisor +# Each member of the list is a path pattern as described by glob(3). +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/opt/kata/bin/qemu-system-x86_64"] +valid_hypervisor_paths = ["/opt/kata/bin/qemu-system-x86_64"] + +# Optional space-separated list of options to pass to the guest kernel. +# For example, use `kernel_params = "vsyscall=emulate"` if you are having +# trouble running pre-2.15 glibc. +# +# WARNING: - any parameter specified here will take priority over the default +# parameter value of the same name used to start the virtual machine. +# Do not set values here unless you understand the impact of doing so as you +# may stop the virtual machine from booting. +# To see the list of default parameters, enable hypervisor debug, create a +# container and look for 'default-kernel-parameters' log entries. +kernel_params = "" + +# Path to the firmware. +# If you want that qemu uses the default firmware leave this option empty +firmware = "" + +# Path to the firmware volume. +# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables +# as configuration) and FIRMWARE_CODE.fd (UEFI program image). UEFI variables +# can be customized per each user while UEFI code is kept same. +firmware_volume = "" + +# Machine accelerators +# comma-separated list of machine accelerators to pass to the hypervisor. +# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` +machine_accelerators="" + +# Qemu seccomp sandbox feature +# comma-separated list of seccomp sandbox features to control the syscall access. +# For example, `seccompsandbox= "on,obsolete=deny,spawn=deny,resourcecontrol=deny"` +# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox +# Another note: enabling this feature may reduce performance, you may enable +# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html +#seccompsandbox="on,obsolete=deny,spawn=deny,resourcecontrol=deny" + +# CPU features +# comma-separated list of cpu features to pass to the cpu +# For example, `cpu_features = "pmu=off,vmx=off" +cpu_features="pmu=off" + +# Default number of vCPUs per SB/VM: +# unspecified or 0 --> will be set to 1 +# < 0 --> will be set to the actual number of physical cores +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores +default_vcpus = 1 + +# Default maximum number of vCPUs per SB/VM: +# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when +# the actual number of physical cores is greater than it. +# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU +# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs +# can be added to a SB/VM, but the memory footprint will be big. Another example, with +# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of +# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, +# unless you know what are you doing. +# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. +default_maxvcpus = 0 + +# Bridges can be used to hot plug devices. +# Limitations: +# * Currently only pci bridges are supported +# * Until 30 devices per bridge can be hot plugged. +# * Until 5 PCI bridges can be cold plugged per VM. +# This limitation could be a bug in qemu or in the kernel +# Default number of bridges per SB/VM: +# unspecified or 0 --> will be set to 1 +# > 1 <= 5 --> will be set to the specified number +# > 5 --> will be set to 5 +default_bridges = 1 + +# Default memory size in MiB for SB/VM. +# If unspecified then it will be set 2048 MiB. +default_memory = {{ kata_containers_qemu_default_memory }} +# +# Default memory slots per SB/VM. +# If unspecified then it will be set 10. +# This is will determine the times that memory will be hotadded to sandbox/VM. +#memory_slots = 10 + +# Default maximum memory in MiB per SB / VM +# unspecified or == 0 --> will be set to the actual amount of physical RAM +# > 0 <= amount of physical RAM --> will be set to the specified number +# > amount of physical RAM --> will be set to the actual amount of physical RAM +default_maxmemory = 0 + +# The size in MiB will be plused to max memory of hypervisor. +# It is the memory address space for the NVDIMM devie. +# If set block storage driver (block_device_driver) to "nvdimm", +# should set memory_offset to the size of block device. +# Default 0 +#memory_offset = 0 + +# Specifies virtio-mem will be enabled or not. +# Please note that this option should be used with the command +# "echo 1 > /proc/sys/vm/overcommit_memory". +# Default false +#enable_virtio_mem = true + +# Disable block device from being used for a container's rootfs. +# In case of a storage driver like devicemapper where a container's +# root file system is backed by a block device, the block device is passed +# directly to the hypervisor for performance reasons. +# This flag prevents the block device from being passed to the hypervisor, +# virtio-fs is used instead to pass the rootfs. +disable_block_device_use = false + +# Shared file system type: +# - virtio-fs (default) +# - virtio-9p +# - virtio-fs-nydus +{% if kata_containers_version is version('2.2.0', '>=') %} +shared_fs = "virtio-fs" +{% else %} +shared_fs = "virtio-9p" +{% endif %} + +# Path to vhost-user-fs daemon. +{% if kata_containers_version is version('2.5.0', '>=') %} +virtio_fs_daemon = "/opt/kata/libexec/virtiofsd" +{% else %} +virtio_fs_daemon = "/opt/kata/libexec/kata-qemu/virtiofsd" +{% endif %} + +# List of valid annotations values for the virtiofs daemon +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/opt/kata/libexec/virtiofsd"] +valid_virtio_fs_daemon_paths = [ + "/opt/kata/libexec/virtiofsd", + "/opt/kata/libexec/kata-qemu/virtiofsd", +] + +# Default size of DAX cache in MiB +virtio_fs_cache_size = 0 + +# Default size of virtqueues +virtio_fs_queue_size = 1024 + +# Extra args for virtiofsd daemon +# +# Format example: +# ["--arg1=xxx", "--arg2=yyy"] +# Examples: +# Set virtiofsd log level to debug : ["--log-level=debug"] +# +# see `virtiofsd -h` for possible options. +virtio_fs_extra_args = ["--thread-pool-size=1", "--announce-submounts"] + +# Cache mode: +# +# - never +# Metadata, data, and pathname lookup are not cached in guest. They are +# always fetched from host and any changes are immediately pushed to host. +# +# - auto +# Metadata and pathname lookup cache expires after a configured amount of +# time (default is 1 second). Data is cached while the file is open (close +# to open consistency). +# +# - always +# Metadata, data, and pathname lookup are cached in guest and never expire. +virtio_fs_cache = "{{ kata_containers_virtio_fs_cache }}" + +# Block storage driver to be used for the hypervisor in case the container +# rootfs is backed by a block device. This is virtio-scsi, virtio-blk +# or nvdimm. +block_device_driver = "virtio-scsi" + +# aio is the I/O mechanism used by qemu +# Options: +# +# - threads +# Pthread based disk I/O. +# +# - native +# Native Linux I/O. +# +# - io_uring +# Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel>5.1 and +# qemu >=5.0. +block_device_aio = "io_uring" + +# Specifies cache-related options will be set to block devices or not. +# Default false +#block_device_cache_set = true + +# Specifies cache-related options for block devices. +# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. +# Default false +#block_device_cache_direct = true + +# Specifies cache-related options for block devices. +# Denotes whether flush requests for the device are ignored. +# Default false +#block_device_cache_noflush = true + +# Enable iothreads (data-plane) to be used. This causes IO to be +# handled in a separate IO thread. This is currently only implemented +# for SCSI. +# +enable_iothreads = false + +# Enable pre allocation of VM RAM, default false +# Enabling this will result in lower container density +# as all of the memory will be allocated and locked +# This is useful when you want to reserve all the memory +# upfront or in the cases where you want memory latencies +# to be very predictable +# Default false +enable_mem_prealloc = {{ kata_containers_qemu_enable_mem_prealloc }} + +# Enable huge pages for VM RAM, default false +# Enabling this will result in the VM memory +# being allocated using huge pages. +# This is useful when you want to use vhost-user network +# stacks within the container. This will automatically +# result in memory pre allocation +#enable_hugepages = true + +# Enable vhost-user storage device, default false +# Enabling this will result in some Linux reserved block type +# major range 240-254 being chosen to represent vhost-user devices. +enable_vhost_user_store = false + +# The base directory specifically used for vhost-user devices. +# Its sub-path "block" is used for block devices; "block/sockets" is +# where we expect vhost-user sockets to live; "block/devices" is where +# simulated block device nodes for vhost-user devices to live. +vhost_user_store_path = "/var/run/kata-containers/vhost-user" + +# Enable vIOMMU, default false +# Enabling this will result in the VM having a vIOMMU device +# This will also add the following options to the kernel's +# command line: intel_iommu=on,iommu=pt +#enable_iommu = true + +# Enable IOMMU_PLATFORM, default false +# Enabling this will result in the VM device having iommu_platform=on set +#enable_iommu_platform = true + +# List of valid annotations values for the vhost user store path +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/var/run/kata-containers/vhost-user"] +valid_vhost_user_store_paths = ["/var/run/kata-containers/vhost-user"] + +# The timeout for reconnecting on non-server spdk sockets when the remote end goes away. +# qemu will delay this many seconds and then attempt to reconnect. +# Zero disables reconnecting, and the default is zero. +vhost_user_reconnect_timeout_sec = 0 + +# Enable file based guest memory support. The default is an empty string which +# will disable this feature. In the case of virtio-fs, this is enabled +# automatically and '/dev/shm' is used as the backing folder. +# This option will be ignored if VM templating is enabled. +#file_mem_backend = "" + +# List of valid annotations values for the file_mem_backend annotation +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: [""] +valid_file_mem_backends = [""] + +# -pflash can add image file to VM. The arguments of it should be in format +# of ["/path/to/flash0.img", "/path/to/flash1.img"] +pflashes = [] + +# This option changes the default hypervisor and kernel parameters +# to enable debug output where available. And Debug also enables the hmp socket. +# +# Default false +enable_debug = {{ kata_containers_qemu_debug }} + +# Disable the customizations done in the runtime when it detects +# that it is running on top a VMM. This will result in the runtime +# behaving as it would when running on bare metal. +# +#disable_nesting_checks = true + +# This is the msize used for 9p shares. It is the number of bytes +# used for 9p packet payload. +#msize_9p = 8192 + +# If false and nvdimm is supported, use nvdimm device to plug guest image. +# Otherwise virtio-block device is used. +# +# nvdimm is not supported when `confidential_guest = true`. +# +# Default is false +#disable_image_nvdimm = true + +# VFIO devices are hotplugged on a bridge by default. +# Enable hotplugging on root bus. This may be required for devices with +# a large PCI bar, as this is a current limitation with hotplugging on +# a bridge. +# Default false +#hotplug_vfio_on_root_bus = true + +# Before hot plugging a PCIe device, you need to add a pcie_root_port device. +# Use this parameter when using some large PCI bar devices, such as Nvidia GPU +# The value means the number of pcie_root_port +# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35" +# Default 0 +#pcie_root_port = 2 + +# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off +# security (vhost-net runs ring0) for network I/O performance. +#disable_vhost_net = true + +# +# Default entropy source. +# The path to a host source of entropy (including a real hardware RNG) +# /dev/urandom and /dev/random are two main options. +# Be aware that /dev/random is a blocking source of entropy. If the host +# runs out of entropy, the VMs boot time will increase leading to get startup +# timeouts. +# The source of entropy /dev/urandom is non-blocking and provides a +# generally acceptable source of entropy. It should work well for pretty much +# all practical purposes. +#entropy_source= "/dev/urandom" + +# List of valid annotations values for entropy_source +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: ["/dev/urandom","/dev/random",""] +valid_entropy_sources = ["/dev/urandom","/dev/random",""] + +# Path to OCI hook binaries in the *guest rootfs*. +# This does not affect host-side hooks which must instead be added to +# the OCI spec passed to the runtime. +# +# You can create a rootfs with hooks by customizing the osbuilder scripts: +# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder +# +# Hooks must be stored in a subdirectory of guest_hook_path according to their +# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}". +# The agent will scan these directories for executable files and add them, in +# lexicographical order, to the lifecycle of the guest container. +# Hooks are executed in the runtime namespace of the guest. See the official documentation: +# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks +# Warnings will be logged if any error is encountered while scanning for hooks, +# but it will not abort container execution. +#guest_hook_path = "/usr/share/oci/hooks" +# +# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic. +# Default 0-sized value means unlimited rate. +#rx_rate_limiter_max_rate = 0 +# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block) +# to discipline traffic. +# Default 0-sized value means unlimited rate. +#tx_rate_limiter_max_rate = 0 + +# Set where to save the guest memory dump file. +# If set, when GUEST_PANICKED event occurred, +# guest memeory will be dumped to host filesystem under guest_memory_dump_path, +# This directory will be created automatically if it does not exist. +# +# The dumped file(also called vmcore) can be processed with crash or gdb. +# +# WARNING: +# Dump guest’s memory can take very long depending on the amount of guest memory +# and use much disk space. +#guest_memory_dump_path="/var/crash/kata" + +# If enable paging. +# Basically, if you want to use "gdb" rather than "crash", +# or need the guest-virtual addresses in the ELF vmcore, +# then you should enable paging. +# +# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details +#guest_memory_dump_paging=false + +# Enable swap in the guest. Default false. +# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device +# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness") +# is bigger than 0. +# The size of the swap device should be +# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes. +# If swap_in_bytes is not set, the size should be memory_limit_in_bytes. +# If swap_in_bytes and memory_limit_in_bytes is not set, the size should +# be default_memory. +#enable_guest_swap = true + +# use legacy serial for guest console if available and implemented for architecture. Default false +#use_legacy_serial = true + +# disable applying SELinux on the VMM process (default false) +disable_selinux=false + +# disable applying SELinux on the container process +# If set to false, the type `container_t` is applied to the container process by default. +# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built +# with `SELINUX=yes`. +# (default: true) +disable_guest_selinux=true + +[factory] +# VM templating support. Once enabled, new VMs are created from template +# using vm cloning. They will share the same initial kernel, initramfs and +# agent memory by mapping it readonly. It helps speeding up new container +# creation and saves a lot of memory if there are many kata containers running +# on the same host. +# +# When disabled, new VMs are created from scratch. +# +# Note: Requires "initrd=" to be set ("image=" is not supported). +# +# Default false +#enable_template = true + +# Specifies the path of template. +# +# Default "/run/vc/vm/template" +#template_path = "/run/vc/vm/template" + +# The number of caches of VMCache: +# unspecified or == 0 --> VMCache is disabled +# > 0 --> will be set to the specified number +# +# VMCache is a function that creates VMs as caches before using it. +# It helps speed up new container creation. +# The function consists of a server and some clients communicating +# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. +# The VMCache server will create some VMs and cache them by factory cache. +# It will convert the VM to gRPC format and transport it when gets +# requestion from clients. +# Factory grpccache is the VMCache client. It will request gRPC format +# VM and convert it back to a VM. If VMCache function is enabled, +# kata-runtime will request VM from factory grpccache when it creates +# a new sandbox. +# +# Default 0 +#vm_cache_number = 0 + +# Specify the address of the Unix socket that is used by VMCache. +# +# Default /var/run/kata-containers/cache.sock +#vm_cache_endpoint = "/var/run/kata-containers/cache.sock" + +[agent.kata] +# If enabled, make the agent display debug-level messages. +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} + +# Enable agent tracing. +# +# If enabled, the agent will generate OpenTelemetry trace spans. +# +# Notes: +# +# - If the runtime also has tracing enabled, the agent spans will be +# associated with the appropriate runtime parent span. +# - If enabled, the runtime will wait for the container to shutdown, +# increasing the container shutdown time slightly. +# +# (default: disabled) +#enable_tracing = true + +# Comma separated list of kernel modules and their parameters. +# These modules will be loaded in the guest kernel using modprobe(8). +# The following example can be used to load two kernel modules with parameters +# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] +# The first word is considered as the module name and the rest as its parameters. +# Container will not be started when: +# * A kernel module is specified and the modprobe command is not installed in the guest +# or it fails loading the module. +# * The module is not available in the guest or it doesn't met the guest kernel +# requirements, like architecture and version. +# +kernel_modules=[] + +# Enable debug console. + +# If enabled, user can connect guest OS running inside hypervisor +# through "kata-runtime exec " command + +#debug_console_enabled = true + +# Agent connection dialing timeout value in seconds +# (default: 30) +#dial_timeout = 30 + +[runtime] +# If enabled, the runtime will log additional debug messages to the +# system log +# (default: disabled) +enable_debug = {{ kata_containers_qemu_debug }} +# +# Internetworking model +# Determines how the VM should be connected to the +# the container network interface +# Options: +# +# - macvtap +# Used when the Container network interface can be bridged using +# macvtap. +# +# - none +# Used when customize network. Only creates a tap device. No veth pair. +# +# - tcfilter +# Uses tc filter rules to redirect traffic from the network interface +# provided by plugin to a tap interface connected to the VM. +# +internetworking_model="tcfilter" + +# disable guest seccomp +# Determines whether container seccomp profiles are passed to the virtual +# machine and applied by the kata agent. If set to true, seccomp is not applied +# within the guest +# (default: true) +disable_guest_seccomp=true + +# vCPUs pinning settings +# if enabled, each vCPU thread will be scheduled to a fixed CPU +# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet) +# enable_vcpus_pinning = false + +# Apply a custom SELinux security policy to the container process inside the VM. +# This is used when you want to apply a type other than the default `container_t`, +# so general users should not uncomment and apply it. +# (format: "user:role:type") +# Note: You cannot specify MCS policy with the label because the sensitivity levels and +# categories are determined automatically by high-level container runtimes such as containerd. +#guest_selinux_label="system_u:system_r:container_t" + +# If enabled, the runtime will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# (default: disabled) +#enable_tracing = true + +# Set the full url to the Jaeger HTTP Thrift collector. +# The default if not set will be "http://localhost:14268/api/traces" +#jaeger_endpoint = "" + +# Sets the username to be used if basic auth is required for Jaeger. +#jaeger_user = "" + +# Sets the password to be used if basic auth is required for Jaeger. +#jaeger_password = "" + +# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. +# This option may have some potential impacts to your host. It should only be used when you know what you're doing. +# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only +# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge +# (like OVS) directly. +# (default: false) +#disable_new_netns = true + +# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. +# The container cgroups in the host are not created, just one single cgroup per sandbox. +# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. +# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. +# The sandbox cgroup is constrained if there is no container type annotation. +# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType +sandbox_cgroup_only={{ kata_containers_qemu_sandbox_cgroup_only }} + +# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In +# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful +# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug. +# Compatibility for determining appropriate sandbox (VM) size: +# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O +# does not yet support sandbox sizing annotations. +# - When running single containers using a tool like ctr, container sizing information will be available. +static_sandbox_resource_mgmt=false + +# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. +# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. +# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` +# These will not be exposed to the container workloads, and are only provided for potential guest services. +sandbox_bind_mounts=[] + +# VFIO Mode +# Determines how VFIO devices should be be presented to the container. +# Options: +# +# - vfio +# Matches behaviour of OCI runtimes (e.g. runc) as much as +# possible. VFIO devices will appear in the container as VFIO +# character devices under /dev/vfio. The exact names may differ +# from the host (they need to match the VM's IOMMU group numbers +# rather than the host's) +# +# - guest-kernel +# This is a Kata-specific behaviour that's useful in certain cases. +# The VFIO device is managed by whatever driver in the VM kernel +# claims it. This means it will appear as one or more device nodes +# or network interfaces depending on the nature of the device. +# Using this mode requires specially built workloads that know how +# to locate the relevant device interfaces within the VM. +# +vfio_mode="guest-kernel" + +# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will +# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest. +disable_guest_empty_dir=false + +# Enabled experimental feature list, format: ["a", "b"]. +# Experimental features are features not stable enough for production, +# they may break compatibility, and are prepared for a big version bump. +# Supported experimental features: +# (default: []) +experimental=[] + +# If enabled, user can run pprof tools with shim v2 process through kata-monitor. +# (default: false) +# enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +#service_offload = true + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/ diff --git a/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 b/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 new file mode 100644 index 00000000000..a3cb830e528 --- /dev/null +++ b/roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2 @@ -0,0 +1,2 @@ +#!/bin/bash +KATA_CONF_FILE={{ kata_containers_config_dir }}/configuration-{{ shim }}.toml {{ kata_containers_dir }}/bin/containerd-shim-kata-v2 $@ diff --git a/roles/container-engine/meta/main.yml b/roles/container-engine/meta/main.yml index 49fe314c8e1..3e068d60a0a 100644 --- a/roles/container-engine/meta/main.yml +++ b/roles/container-engine/meta/main.yml @@ -6,6 +6,13 @@ dependencies: - container-engine - validate-container-engine + - role: container-engine/kata-containers + when: + - kata_containers_enabled + tags: + - container-engine + - kata-containers + - role: container-engine/gvisor when: - gvisor_enabled @@ -14,9 +21,38 @@ dependencies: - container-engine - gvisor + - role: container-engine/crun + when: + - crun_enabled + tags: + - container-engine + - crun + + - role: container-engine/youki + when: + - youki_enabled + - container_manager == 'crio' + tags: + - container-engine + - youki + + - role: container-engine/cri-o + when: + - container_manager == 'crio' + tags: + - container-engine + - crio + - role: container-engine/containerd when: - container_manager == 'containerd' tags: - container-engine - containerd + + - role: container-engine/cri-dockerd + when: + - container_manager == 'docker' + tags: + - container-engine + - docker diff --git a/roles/container-engine/molecule/test_cri.yml b/roles/container-engine/molecule/test_cri.yml deleted file mode 100644 index e40fe111f2e..00000000000 --- a/roles/container-engine/molecule/test_cri.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Test container manager - hosts: all - gather_facts: false - become: true - tasks: - - name: Get kubespray defaults - import_role: - name: ../../kubespray_defaults - - name: Collect services facts - ansible.builtin.service_facts: - - - name: Check container manager service is running - assert: - that: - - ansible_facts.services[container_manager + '.service'].state == 'running' - - ansible_facts.services[container_manager + '.service'].status == 'enabled' - - - name: Check runtime version - command: "{{ bin_dir }}/crictl --runtime-endpoint {{ cri_socket }} version" - register: cri_version - failed_when: > - cri_version is failed or - ("RuntimeName: " + cri_name) not in cri_version.stdout diff --git a/roles/container-engine/molecule/test_runtime.yml b/roles/container-engine/molecule/test_runtime.yml deleted file mode 100644 index e9706362944..00000000000 --- a/roles/container-engine/molecule/test_runtime.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: Test container runtime - hosts: all - gather_facts: false - become: true - roles: - - role: ../../kubespray_defaults - tasks: - - name: Copy test container files - template: - src: "{{ item }}.j2" - dest: "/tmp/{{ item }}" - owner: root - mode: "0644" - loop: - - container.json - - sandbox.json - - name: Check running a container with runtime {{ container_runtime }} - block: - - name: Run container - command: - argv: - - "{{ bin_dir }}/crictl" - - run - - --with-pull - - --runtime - - "{{ container_runtime }}" - - /tmp/container.json - - /tmp/sandbox.json - - name: Check log file - slurp: - src: "/tmp/{{ container_runtime }}1.0.log" - register: log_file - failed_when: > - log_file is failed or - 'Hello from Docker' not in (log_file.content | b64decode) - rescue: - - name: Display container manager config on error - command: "{{ bin_dir }}/crictl info" - - name: Check container manager logs - command: journalctl -u {{ container_manager }} - failed_when: true diff --git a/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 b/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 index e9bf6006184..8b590f6f53c 100644 --- a/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 +++ b/roles/container-engine/nerdctl/templates/nerdctl.toml.j2 @@ -6,4 +6,4 @@ snapshotter = "{{ nerdctl_snapshotter | default('overlayfs') }}" cni_path = "/opt/cni/bin" cni_netconfpath = "/etc/cni/net.d" cgroup_manager = "{{ kubelet_cgroup_driver | default('systemd') }}" -hosts_dir = ["{{ containerd_cfg_dir }}/certs.d"] \ No newline at end of file +hosts_dir = ["{{ containerd_cfg_dir }}/certs.d"] diff --git a/roles/container-engine/skopeo/tasks/main.yml b/roles/container-engine/skopeo/tasks/main.yml new file mode 100644 index 00000000000..8f21e3f1c3b --- /dev/null +++ b/roles/container-engine/skopeo/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: Skopeo | check if fedora coreos + stat: + path: /run/ostree-booted + get_attributes: false + get_checksum: false + get_mime: false + register: ostree + +- name: Skopeo | set is_ostree + set_fact: + is_ostree: "{{ ostree.stat.exists }}" + +- name: Skopeo | Uninstall skopeo package managed by package manager + package: + name: skopeo + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + ignore_errors: true # noqa ignore-errors + +- name: Skopeo | Download skopeo binary + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.skopeo) }}" + +- name: Copy skopeo binary from download dir + copy: + src: "{{ downloads.skopeo.dest }}" + dest: "{{ bin_dir }}/skopeo" + mode: "0755" + remote_src: true diff --git a/roles/container-engine/validate-container-engine/tasks/main.yml b/roles/container-engine/validate-container-engine/tasks/main.yml index c1e9d45f838..ffb541c2480 100644 --- a/roles/container-engine/validate-container-engine/tasks/main.yml +++ b/roles/container-engine/validate-container-engine/tasks/main.yml @@ -84,7 +84,7 @@ block: - name: Drain node include_role: - name: remove_node/pre_remove + name: remove-node/pre-remove apply: tags: - pre-remove @@ -111,7 +111,7 @@ block: - name: Drain node include_role: - name: remove_node/pre_remove + name: remove-node/pre-remove apply: tags: - pre-remove @@ -137,7 +137,7 @@ block: - name: Drain node include_role: - name: remove_node/pre_remove + name: remove-node/pre-remove apply: tags: - pre-remove diff --git a/roles/container-engine/youki/defaults/main.yml b/roles/container-engine/youki/defaults/main.yml new file mode 100644 index 00000000000..2250f22ae3d --- /dev/null +++ b/roles/container-engine/youki/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +youki_bin_dir: "{{ bin_dir }}" diff --git a/roles/container-engine/youki/molecule/default/converge.yml b/roles/container-engine/youki/molecule/default/converge.yml new file mode 100644 index 00000000000..11ef8f6bf6c --- /dev/null +++ b/roles/container-engine/youki/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge + hosts: all + become: true + vars: + youki_enabled: true + container_manager: crio + roles: + - role: kubespray-defaults + - role: container-engine/cri-o + - role: container-engine/youki diff --git a/roles/container-engine/youki/molecule/default/files/10-mynet.conf b/roles/container-engine/youki/molecule/default/files/10-mynet.conf new file mode 100644 index 00000000000..b9fa3ba73b6 --- /dev/null +++ b/roles/container-engine/youki/molecule/default/files/10-mynet.conf @@ -0,0 +1,17 @@ +{ + "cniVersion": "0.4.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "172.19.0.0/24", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ] + } +} diff --git a/roles/container-engine/youki/molecule/default/files/container.json b/roles/container-engine/youki/molecule/default/files/container.json new file mode 100644 index 00000000000..a5d50943128 --- /dev/null +++ b/roles/container-engine/youki/molecule/default/files/container.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "youki1" + }, + "image": { + "image": "quay.io/kubespray/hello-world:latest" + }, + "log_path": "youki1.0.log", + "linux": {} +} diff --git a/roles/container-engine/youki/molecule/default/files/sandbox.json b/roles/container-engine/youki/molecule/default/files/sandbox.json new file mode 100644 index 00000000000..b2a4ffe50fe --- /dev/null +++ b/roles/container-engine/youki/molecule/default/files/sandbox.json @@ -0,0 +1,10 @@ +{ + "metadata": { + "name": "youki1", + "namespace": "default", + "attempt": 1, + "uid": "hdishd83djaidwnduwk28bcsb" + }, + "linux": {}, + "log_directory": "/tmp" +} diff --git a/roles/container-engine/youki/molecule/default/molecule.yml b/roles/container-engine/youki/molecule/default/molecule.yml new file mode 100644 index 00000000000..9bf49633149 --- /dev/null +++ b/roles/container-engine/youki/molecule/default/molecule.yml @@ -0,0 +1,39 @@ +--- +role_name_check: 1 +driver: + name: vagrant + provider: + name: libvirt +platforms: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + provider_options: + driver: kvm + - name: almalinux9 + box: almalinux/9 + cpus: 1 + memory: 1024 + nested: true + groups: + - kube_control_plane + provider_options: + driver: kvm +provisioner: + name: ansible + env: + ANSIBLE_ROLES_PATH: ../../../../ + config_options: + defaults: + callbacks_enabled: profile_tasks + timeout: 120 + inventory: + group_vars: + all: + become: true +verifier: + name: testinfra diff --git a/roles/container-engine/youki/molecule/default/prepare.yml b/roles/container-engine/youki/molecule/default/prepare.yml new file mode 100644 index 00000000000..a72bdad7f5d --- /dev/null +++ b/roles/container-engine/youki/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: crio + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/youki/molecule/default/tests/test_default.py b/roles/container-engine/youki/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..54ed5c54cbd --- /dev/null +++ b/roles/container-engine/youki/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + youkiruntime = "/usr/local/bin/youki" + with host.sudo(): + cmd = host.command(youkiruntime + " --version") + assert cmd.rc == 0 + assert "youki" in cmd.stdout + + +def test_run_pod(host): + runtime = "youki" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/youki1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/youki/tasks/main.yml b/roles/container-engine/youki/tasks/main.yml new file mode 100644 index 00000000000..7750c65b8d2 --- /dev/null +++ b/roles/container-engine/youki/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Youki | Download youki + include_tasks: "../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.youki) }}" + +- name: Youki | Copy youki binary from download dir + copy: + src: "{{ local_release_dir }}/youki" + dest: "{{ youki_bin_dir }}/youki" + mode: "0755" + remote_src: true From 4563a0cb73fb2a4a016ba9f7bc29256bb47ce03b Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Sun, 21 Sep 2025 01:11:54 +0700 Subject: [PATCH 09/10] add cilium config --- .gitignore | 1 + .../group_vars/all/database_nodes.yml | 8 - .../group_vars/k8s_cluster/addons.yml | 2 +- .../group_vars/k8s_cluster/k8s-cluster.yml | 2 +- .../k8s_cluster/k8s-net-flannel.yml | 18 + .../network_plugin/calico/files/openssl.conf | 27 + roles/network_plugin/calico/handlers/main.yml | 31 + roles/network_plugin/calico/meta/main.yml | 3 + .../calico/rr/defaults/main.yml | 5 + roles/network_plugin/calico/rr/tasks/main.yml | 16 + roles/network_plugin/calico/rr/tasks/pre.yml | 15 + .../calico/rr/tasks/update-node.yml | 50 + .../calico/tasks/calico_apiserver_certs.yml | 60 + roles/network_plugin/calico/tasks/check.yml | 235 ++ roles/network_plugin/calico/tasks/install.yml | 510 ++++ roles/network_plugin/calico/tasks/main.yml | 9 + .../calico/tasks/peer_with_calico_rr.yml | 86 + .../calico/tasks/peer_with_router.yml | 116 + roles/network_plugin/calico/tasks/pre.yml | 36 + roles/network_plugin/calico/tasks/repos.yml | 21 + roles/network_plugin/calico/tasks/reset.yml | 30 + .../calico/tasks/typha_certs.yml | 52 + .../templates/calico-apiserver-ns.yml.j2 | 10 + .../calico/templates/calico-apiserver.yml.j2 | 301 ++ .../calico/templates/calico-config.yml.j2 | 106 + .../calico/templates/calico-cr.yml.j2 | 213 ++ .../calico/templates/calico-crb.yml.j2 | 28 + .../calico/templates/calico-ipamconfig.yml.j2 | 8 + .../calico/templates/calico-node-sa.yml.j2 | 13 + .../calico/templates/calico-node.yml.j2 | 509 ++++ .../calico/templates/calico-typha.yml.j2 | 186 ++ .../calico/templates/calicoctl.etcd.sh.j2 | 6 + .../calico/templates/calicoctl.kdd.sh.j2 | 8 + .../kubernetes-services-endpoint.yml.j2 | 11 + .../calico/templates/make-ssl-calico.sh.j2 | 102 + roles/network_plugin/calico/vars/amazon.yml | 5 + roles/network_plugin/calico/vars/centos-9.yml | 3 + roles/network_plugin/calico/vars/debian.yml | 3 + roles/network_plugin/calico/vars/fedora.yml | 3 + roles/network_plugin/calico/vars/opensuse.yml | 3 + roles/network_plugin/calico/vars/redhat-9.yml | 3 + roles/network_plugin/calico/vars/redhat.yml | 4 + roles/network_plugin/calico/vars/rocky-9.yml | 3 + .../calico_defaults/defaults/main.yml | 176 ++ roles/network_plugin/cilium/defaults/main.yml | 139 +- roles/network_plugin/cilium/tasks/apply.yml | 62 +- roles/network_plugin/cilium/tasks/check.yml | 6 +- roles/network_plugin/cilium/tasks/install.yml | 68 +- roles/network_plugin/cilium/tasks/main.yml | 5 - .../cilium/tasks/remove_old_resources.yml | 45 - .../templates/cilium-operator/cr.yml.j2 | 193 ++ .../templates/cilium-operator/crb.yml.j2 | 13 + .../templates/cilium-operator/deploy.yml.j2 | 170 ++ .../templates/cilium-operator/sa.yml.j2 | 6 + .../cilium/cilium-loadbalancer-ip-pool.yml.j2 | 6 +- .../cilium/templates/cilium/config.yml.j2 | 299 ++ .../cilium/templates/cilium/cr.yml.j2 | 166 ++ .../cilium/templates/cilium/crb.yml.j2 | 13 + .../cilium/templates/cilium/ds.yml.j2 | 446 +++ .../cilium/templates/cilium/sa.yml.j2 | 6 + .../cilium/templates/cilium/secret.yml.j2 | 9 + .../cilium/templates/hubble/config.yml.j2 | 71 + .../cilium/templates/hubble/cr.yml.j2 | 108 + .../cilium/templates/hubble/crb.yml.j2 | 46 + .../cilium/templates/hubble/cronjob.yml.j2 | 38 + .../cilium/templates/hubble/deploy.yml.j2 | 199 ++ .../cilium/templates/hubble/job.yml.j2 | 34 + .../cilium/templates/hubble/sa.yml.j2 | 25 + .../cilium/templates/hubble/service.yml.j2 | 106 + .../cilium/templates/values.yaml.j2 | 172 -- roles/network_plugin/cni/defaults/main.yml | 2 + roles/network_plugin/cni/tasks/main.yml | 16 + .../custom_cni/defaults/main.yml | 11 + roles/network_plugin/custom_cni/meta/main.yml | 20 + .../network_plugin/custom_cni/tasks/main.yml | 29 + .../network_plugin/flannel/defaults/main.yml | 28 + roles/network_plugin/flannel/meta/main.yml | 3 + roles/network_plugin/flannel/tasks/main.yml | 21 + roles/network_plugin/flannel/tasks/reset.yml | 24 + .../flannel/templates/cni-flannel-rbac.yml.j2 | 52 + .../flannel/templates/cni-flannel.yml.j2 | 172 ++ .../network_plugin/kube-ovn/defaults/main.yml | 135 + roles/network_plugin/kube-ovn/tasks/main.yml | 17 + .../templates/cni-kube-ovn-crd.yml.j2 | 2587 +++++++++++++++++ .../kube-ovn/templates/cni-kube-ovn.yml.j2 | 912 ++++++ .../kube-ovn/templates/cni-ovn.yml.j2 | 674 +++++ .../kube-router/defaults/main.yml | 69 + .../kube-router/handlers/main.yml | 20 + .../network_plugin/kube-router/meta/main.yml | 3 + .../kube-router/tasks/annotate.yml | 21 + .../network_plugin/kube-router/tasks/main.yml | 62 + .../kube-router/tasks/reset.yml | 28 + .../kube-router/templates/cni-conf.json.j2 | 27 + .../kube-router/templates/kube-router.yml.j2 | 228 ++ .../kube-router/templates/kubeconfig.yml.j2 | 18 + .../network_plugin/macvlan/defaults/main.yml | 6 + .../network_plugin/macvlan/files/ifdown-local | 6 + .../macvlan/files/ifdown-macvlan | 40 + roles/network_plugin/macvlan/files/ifup-local | 6 + .../network_plugin/macvlan/files/ifup-macvlan | 43 + .../network_plugin/macvlan/handlers/main.yml | 15 + roles/network_plugin/macvlan/meta/main.yml | 3 + roles/network_plugin/macvlan/tasks/main.yml | 110 + .../macvlan/templates/10-macvlan.conf.j2 | 15 + .../macvlan/templates/99-loopback.conf.j2 | 5 + .../templates/centos-network-macvlan.cfg.j2 | 13 + .../templates/centos-postdown-macvlan.cfg.j2 | 3 + .../templates/centos-postup-macvlan.cfg.j2 | 3 + .../templates/centos-routes-macvlan.cfg.j2 | 7 + .../templates/coreos-device-macvlan.cfg.j2 | 6 + .../templates/coreos-interface-macvlan.cfg.j2 | 6 + .../templates/coreos-network-macvlan.cfg.j2 | 17 + .../templates/coreos-service-nat_ouside.j2 | 6 + .../templates/debian-network-macvlan.cfg.j2 | 26 + roles/network_plugin/meta/main.yml | 45 +- roles/network_plugin/multus/defaults/main.yml | 10 + .../multus/files/multus-clusterrole.yml | 28 + .../files/multus-clusterrolebinding.yml | 13 + .../multus/files/multus-crd.yml | 45 + .../multus/files/multus-serviceaccount.yml | 6 + roles/network_plugin/multus/meta/main.yml | 3 + roles/network_plugin/multus/tasks/main.yml | 36 + .../multus/templates/multus-daemonset.yml.j2 | 100 + roles/network_plugin/ovn4nfv/tasks/main.yml | 16 + roles/network_plugin/weave/defaults/main.yml | 64 + roles/network_plugin/weave/meta/main.yml | 3 + roles/network_plugin/weave/tasks/main.yml | 12 + .../weave/templates/10-weave.conflist.j2 | 16 + .../weave/templates/weave-net.yml.j2 | 297 ++ 129 files changed, 11333 insertions(+), 364 deletions(-) delete mode 100644 inventory/2SpeedLab/group_vars/all/database_nodes.yml create mode 100644 inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml create mode 100644 roles/network_plugin/calico/files/openssl.conf create mode 100644 roles/network_plugin/calico/handlers/main.yml create mode 100644 roles/network_plugin/calico/meta/main.yml create mode 100644 roles/network_plugin/calico/rr/defaults/main.yml create mode 100644 roles/network_plugin/calico/rr/tasks/main.yml create mode 100644 roles/network_plugin/calico/rr/tasks/pre.yml create mode 100644 roles/network_plugin/calico/rr/tasks/update-node.yml create mode 100644 roles/network_plugin/calico/tasks/calico_apiserver_certs.yml create mode 100644 roles/network_plugin/calico/tasks/check.yml create mode 100644 roles/network_plugin/calico/tasks/install.yml create mode 100644 roles/network_plugin/calico/tasks/main.yml create mode 100644 roles/network_plugin/calico/tasks/peer_with_calico_rr.yml create mode 100644 roles/network_plugin/calico/tasks/peer_with_router.yml create mode 100644 roles/network_plugin/calico/tasks/pre.yml create mode 100644 roles/network_plugin/calico/tasks/repos.yml create mode 100644 roles/network_plugin/calico/tasks/reset.yml create mode 100644 roles/network_plugin/calico/tasks/typha_certs.yml create mode 100644 roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-apiserver.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-config.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-cr.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-crb.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-node-sa.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-node.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-typha.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 create mode 100644 roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 create mode 100644 roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 create mode 100644 roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 create mode 100644 roles/network_plugin/calico/vars/amazon.yml create mode 100644 roles/network_plugin/calico/vars/centos-9.yml create mode 100644 roles/network_plugin/calico/vars/debian.yml create mode 100644 roles/network_plugin/calico/vars/fedora.yml create mode 100644 roles/network_plugin/calico/vars/opensuse.yml create mode 100644 roles/network_plugin/calico/vars/redhat-9.yml create mode 100644 roles/network_plugin/calico/vars/redhat.yml create mode 100644 roles/network_plugin/calico/vars/rocky-9.yml create mode 100644 roles/network_plugin/calico_defaults/defaults/main.yml delete mode 100644 roles/network_plugin/cilium/tasks/remove_old_resources.yml create mode 100644 roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium/config.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium/cr.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium/crb.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium/ds.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium/sa.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/cilium/secret.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/config.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/cr.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/crb.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/job.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/sa.yml.j2 create mode 100644 roles/network_plugin/cilium/templates/hubble/service.yml.j2 delete mode 100644 roles/network_plugin/cilium/templates/values.yaml.j2 create mode 100644 roles/network_plugin/cni/defaults/main.yml create mode 100644 roles/network_plugin/cni/tasks/main.yml create mode 100644 roles/network_plugin/custom_cni/defaults/main.yml create mode 100644 roles/network_plugin/custom_cni/meta/main.yml create mode 100644 roles/network_plugin/custom_cni/tasks/main.yml create mode 100644 roles/network_plugin/flannel/defaults/main.yml create mode 100644 roles/network_plugin/flannel/meta/main.yml create mode 100644 roles/network_plugin/flannel/tasks/main.yml create mode 100644 roles/network_plugin/flannel/tasks/reset.yml create mode 100644 roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 create mode 100644 roles/network_plugin/flannel/templates/cni-flannel.yml.j2 create mode 100644 roles/network_plugin/kube-ovn/defaults/main.yml create mode 100644 roles/network_plugin/kube-ovn/tasks/main.yml create mode 100644 roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 create mode 100644 roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 create mode 100644 roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 create mode 100644 roles/network_plugin/kube-router/defaults/main.yml create mode 100644 roles/network_plugin/kube-router/handlers/main.yml create mode 100644 roles/network_plugin/kube-router/meta/main.yml create mode 100644 roles/network_plugin/kube-router/tasks/annotate.yml create mode 100644 roles/network_plugin/kube-router/tasks/main.yml create mode 100644 roles/network_plugin/kube-router/tasks/reset.yml create mode 100644 roles/network_plugin/kube-router/templates/cni-conf.json.j2 create mode 100644 roles/network_plugin/kube-router/templates/kube-router.yml.j2 create mode 100644 roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 create mode 100644 roles/network_plugin/macvlan/defaults/main.yml create mode 100644 roles/network_plugin/macvlan/files/ifdown-local create mode 100755 roles/network_plugin/macvlan/files/ifdown-macvlan create mode 100755 roles/network_plugin/macvlan/files/ifup-local create mode 100755 roles/network_plugin/macvlan/files/ifup-macvlan create mode 100644 roles/network_plugin/macvlan/handlers/main.yml create mode 100644 roles/network_plugin/macvlan/meta/main.yml create mode 100644 roles/network_plugin/macvlan/tasks/main.yml create mode 100644 roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 create mode 100644 roles/network_plugin/macvlan/templates/99-loopback.conf.j2 create mode 100644 roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 create mode 100644 roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 create mode 100644 roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 create mode 100644 roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 create mode 100644 roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 create mode 100644 roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 create mode 100644 roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 create mode 100644 roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 create mode 100644 roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 create mode 100644 roles/network_plugin/multus/defaults/main.yml create mode 100644 roles/network_plugin/multus/files/multus-clusterrole.yml create mode 100644 roles/network_plugin/multus/files/multus-clusterrolebinding.yml create mode 100644 roles/network_plugin/multus/files/multus-crd.yml create mode 100644 roles/network_plugin/multus/files/multus-serviceaccount.yml create mode 100644 roles/network_plugin/multus/meta/main.yml create mode 100644 roles/network_plugin/multus/tasks/main.yml create mode 100644 roles/network_plugin/multus/templates/multus-daemonset.yml.j2 create mode 100644 roles/network_plugin/ovn4nfv/tasks/main.yml create mode 100644 roles/network_plugin/weave/defaults/main.yml create mode 100644 roles/network_plugin/weave/meta/main.yml create mode 100644 roles/network_plugin/weave/tasks/main.yml create mode 100644 roles/network_plugin/weave/templates/10-weave.conflist.j2 create mode 100644 roles/network_plugin/weave/templates/weave-net.yml.j2 diff --git a/.gitignore b/.gitignore index 969166853bd..43cc0d7c330 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ plugins/mitogen !inventory/local !inventory/sample inventory/*/artifacts/ +!inventory/2SpeedLab/inventory.ini # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/inventory/2SpeedLab/group_vars/all/database_nodes.yml b/inventory/2SpeedLab/group_vars/all/database_nodes.yml deleted file mode 100644 index 2dd37f21267..00000000000 --- a/inventory/2SpeedLab/group_vars/all/database_nodes.yml +++ /dev/null @@ -1,8 +0,0 @@ -# Taints for database nodes -node_taints: - - "database=true:NoSchedule" - -# Optional: Add node labels -node_labels: - node-type: database - workload: database diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml index 10ec18e2ac1..80dab1af991 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml @@ -226,7 +226,7 @@ argocd_enabled: false kube_vip_enabled: false #kube_vip_arp_enabled: true #kube_vip_controlplane_enabled: true -#kube_vip_address: 100.10.0.2 +#kube_vip_address: 10.10.88.21 #loadbalancer_apiserver: # address: "{{ kube_vip_address }}" # port: 6443 diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml index 4792391de6c..89f047d8368 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -286,7 +286,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" ## Supplementary addresses that can be added in kubernetes ssl keys. ## That can be useful for example to setup a keepalived virtual IP -# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] +supplementary_addresses_in_ssl_keys: [10.10.24.105] ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. ## See https://github.com/kubernetes-sigs/kubespray/issues/2141 diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 00000000000..64d20a825bb --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/roles/network_plugin/calico/files/openssl.conf b/roles/network_plugin/calico/files/openssl.conf new file mode 100644 index 00000000000..f4ba47da731 --- /dev/null +++ b/roles/network_plugin/calico/files/openssl.conf @@ -0,0 +1,27 @@ +req_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment + +[ ssl_client ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + +[ v3_ca ] +basicConstraints = CA:TRUE +keyUsage = cRLSign, digitalSignature, keyCertSign +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid:always,issuer + +[ ssl_client_apiserver ] +extendedKeyUsage = clientAuth, serverAuth +basicConstraints = CA:FALSE +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer +subjectAltName = DNS:calico-api.calico-apiserver.svc diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml new file mode 100644 index 00000000000..f5f5dc29ebc --- /dev/null +++ b/roles/network_plugin/calico/handlers/main.yml @@ -0,0 +1,31 @@ +--- +- name: Delete 10-calico.conflist + file: + path: /etc/cni/net.d/10-calico.conflist + state: absent + listen: Reset_calico_cni + when: calico_cni_config is defined + +- name: Calico | delete calico-node docker containers + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" + args: + executable: /bin/bash + register: docker_calico_node_remove + until: docker_calico_node_remove is succeeded + retries: 5 + when: + - container_manager in ["docker"] + - calico_cni_config is defined + listen: Reset_calico_cni + +- name: Calico | delete calico-node crio/containerd containers + shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + args: + executable: /bin/bash + register: crictl_calico_node_remove + until: crictl_calico_node_remove is succeeded + retries: 5 + when: + - container_manager in ["crio", "containerd"] + - calico_cni_config is defined + listen: Reset_calico_cni diff --git a/roles/network_plugin/calico/meta/main.yml b/roles/network_plugin/calico/meta/main.yml new file mode 100644 index 00000000000..15e9b8c408d --- /dev/null +++ b/roles/network_plugin/calico/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/calico_defaults diff --git a/roles/network_plugin/calico/rr/defaults/main.yml b/roles/network_plugin/calico/rr/defaults/main.yml new file mode 100644 index 00000000000..dedda197cbc --- /dev/null +++ b/roles/network_plugin/calico/rr/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# Global as_num (/calico/bgp/v1/global/as_num) +# should be the same as in calico role +global_as_num: "64512" +calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml new file mode 100644 index 00000000000..471518d9f22 --- /dev/null +++ b/roles/network_plugin/calico/rr/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Calico-rr | Pre-upgrade tasks + include_tasks: pre.yml + +- name: Calico-rr | Configuring node tasks + include_tasks: update-node.yml + +- name: Calico-rr | Set label for route reflector + command: >- + {{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} + 'i-am-a-route-reflector=true' --overwrite + changed_when: false + register: calico_rr_label + until: calico_rr_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 diff --git a/roles/network_plugin/calico/rr/tasks/pre.yml b/roles/network_plugin/calico/rr/tasks/pre.yml new file mode 100644 index 00000000000..f8a9de6118b --- /dev/null +++ b/roles/network_plugin/calico/rr/tasks/pre.yml @@ -0,0 +1,15 @@ +--- +- name: Calico-rr | Disable calico-rr service if it exists + service: + name: calico-rr + state: stopped + enabled: false + failed_when: false + +- name: Calico-rr | Delete obsolete files + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/calico/calico-rr.env + - /etc/systemd/system/calico-rr.service diff --git a/roles/network_plugin/calico/rr/tasks/update-node.yml b/roles/network_plugin/calico/rr/tasks/update-node.yml new file mode 100644 index 00000000000..fc873ba13fd --- /dev/null +++ b/roles/network_plugin/calico/rr/tasks/update-node.yml @@ -0,0 +1,50 @@ +--- +# Workaround to retry a block of tasks, ansible doesn't have a direct way to do it, +# you can follow the block loop request in: https://github.com/ansible/ansible/issues/46203 +- name: Calico-rr | Configure route reflector + block: + - name: Set the retry count + set_fact: + retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}" + + - name: Calico | Set label for route reflector # noqa command-instead-of-shell + shell: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-rr-id={{ calico_rr_id }} --overwrite" + changed_when: false + register: calico_rr_id_label + until: calico_rr_id_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + when: calico_rr_id is defined + + - name: Calico-rr | Fetch current node object + command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson" + changed_when: false + register: calico_rr_node + until: calico_rr_node is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + + - name: Calico-rr | Set route reflector cluster ID + # noqa: jinja[spacing] + set_fact: + calico_rr_node_patched: >- + {{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp': + { 'routeReflectorClusterID': cluster_id }}}, recursive=True) }} + + - name: Calico-rr | Configure route reflector # noqa command-instead-of-shell + shell: "{{ bin_dir }}/calicoctl.sh replace -f-" + args: + stdin: "{{ calico_rr_node_patched | to_json }}" + + rescue: + - name: Fail if retry limit is reached + fail: + msg: Ended after 10 retries + when: retry_count | int == 10 + + - name: Retrying node configuration + debug: + msg: "Failed to configure route reflector - Retrying..." + + - name: Retry node configuration + include_tasks: update-node.yml diff --git a/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml b/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml new file mode 100644 index 00000000000..d42917c4eca --- /dev/null +++ b/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml @@ -0,0 +1,60 @@ +--- +- name: Calico | Check if calico apiserver exists + command: "{{ kubectl }} -n calico-apiserver get secret calico-apiserver-certs" + register: calico_apiserver_secret + changed_when: false + failed_when: false + +- name: Calico | Create ns manifests + template: + src: "calico-apiserver-ns.yml.j2" + dest: "{{ kube_config_dir }}/calico-apiserver-ns.yml" + mode: "0644" + +- name: Calico | Apply ns manifests + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/calico-apiserver-ns.yml" + state: "latest" + +- name: Calico | Ensure calico certs dir + file: + path: /etc/calico/certs + state: directory + mode: "0755" + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Copy ssl script for apiserver certs + template: + src: make-ssl-calico.sh.j2 + dest: "{{ bin_dir }}/make-ssl-apiserver.sh" + mode: "0755" + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Copy ssl config for apiserver certs + copy: + src: openssl.conf + dest: /etc/calico/certs/openssl.conf + mode: "0644" + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Generate apiserver certs + command: >- + {{ bin_dir }}/make-ssl-apiserver.sh + -f /etc/calico/certs/openssl.conf + -c {{ kube_cert_dir }} + -d /etc/calico/certs + -s apiserver + when: calico_apiserver_secret.rc != 0 + +- name: Calico | Create calico apiserver generic secrets + command: >- + {{ kubectl }} -n calico-apiserver + create secret generic {{ item.name }} + --from-file={{ item.cert }} + --from-file={{ item.key }} + with_items: + - name: calico-apiserver-certs + cert: /etc/calico/certs/apiserver.crt + key: /etc/calico/certs/apiserver.key + when: calico_apiserver_secret.rc != 0 diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml new file mode 100644 index 00000000000..d512a9648b6 --- /dev/null +++ b/roles/network_plugin/calico/tasks/check.yml @@ -0,0 +1,235 @@ +--- +- name: Stop if legacy encapsulation variables are detected (ipip) + assert: + that: + - ipip is not defined + msg: "'ipip' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if legacy encapsulation variables are detected (ipip_mode) + assert: + that: + - ipip_mode is not defined + msg: "'ipip_mode' configuration variable is deprecated, please configure your inventory with 'calico_ipip_mode' set to 'Always' or 'CrossSubnet' according to your specific needs" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Stop if legacy encapsulation variables are detected (calcio_ipam_autoallocateblocks) + assert: + that: + - calcio_ipam_autoallocateblocks is not defined + msg: "'calcio_ipam_autoallocateblocks' configuration variable is deprecated, it's a typo, please configure your inventory with 'calico_ipam_autoallocateblocks' set to 'true' or 'false' according to your specific needs" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + + +- name: Stop if supported Calico versions + assert: + that: + - "calico_version in calico_crds_archive_checksums.no_arch.keys()" + msg: "Calico version not supported {{ calico_version }} not in {{ calico_crds_archive_checksums.no_arch.keys() }}" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Check if calicoctl.sh exists + stat: + path: "{{ bin_dir }}/calicoctl.sh" + register: calicoctl_sh_exists + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Check if calico ready + command: "{{ bin_dir }}/calicoctl.sh get ClusterInformation default" + register: calico_ready + run_once: true + ignore_errors: true + retries: 5 + delay: 10 + until: calico_ready.rc == 0 + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: calicoctl_sh_exists.stat.exists + +- name: Check that current calico version is enough for upgrade + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: calicoctl_sh_exists.stat.exists and calico_ready.rc == 0 + block: + - name: Get current calico version + shell: "set -o pipefail && {{ bin_dir }}/calicoctl.sh version | grep 'Client Version:' | awk '{ print $3}'" + args: + executable: /bin/bash + register: calico_version_on_server + changed_when: false + + - name: Assert that current calico version is enough for upgrade + assert: + that: + - calico_version_on_server.stdout.removeprefix('v') is version(calico_min_version_required, '>=') + msg: > + Your version of calico is not fresh enough for upgrade. + Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release. + But current version is {{ calico_version_on_server.stdout }}. + +- name: "Check that cluster_id is set and a valid IPv4 address if calico_rr enabled" + assert: + that: + - cluster_id is defined + - cluster_id is ansible.utils.ipv4 + msg: "A unique cluster_id is required if using calico_rr, and it must be a valid IPv4 address" + when: + - peer_with_calico_rr + - inventory_hostname == groups['kube_control_plane'][0] + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check that calico_rr nodes are in k8s_cluster group" + assert: + that: + - '"k8s_cluster" in group_names' + msg: "calico_rr must be a child group of k8s_cluster group" + when: + - '"calico_rr" in group_names' + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check vars defined correctly" + assert: + that: + - "calico_pool_name is defined" + - "calico_pool_name is match('^[a-zA-Z0-9-_\\\\.]{2,63}$')" + msg: "calico_pool_name contains invalid characters" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check calico network backend defined correctly" + assert: + that: + - "calico_network_backend in ['bird', 'vxlan', 'none']" + msg: "calico network backend is not 'bird', 'vxlan' or 'none'" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode defined correctly" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + assert: + that: + - "calico_ipip_mode in ['Always', 'CrossSubnet', 'Never']" + - "calico_vxlan_mode in ['Always', 'CrossSubnet', 'Never']" + msg: "calico inter host encapsulation mode is not 'Always', 'CrossSubnet' or 'Never'" + +- name: "Check ipip and vxlan mode if simultaneously enabled" + assert: + that: + - "calico_vxlan_mode in ['Never']" + msg: "IP in IP and VXLAN mode is mutualy exclusive modes" + when: + - "calico_ipip_mode in ['Always', 'CrossSubnet']" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip and vxlan mode if simultaneously enabled" + assert: + that: + - "calico_ipip_mode in ['Never']" + msg: "IP in IP and VXLAN mode is mutualy exclusive modes" + when: + - "calico_vxlan_mode in ['Always', 'CrossSubnet']" + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Get Calico {{ calico_pool_name }} configuration" + command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }} -o json" + failed_when: false + changed_when: false + check_mode: false + register: calico + run_once: true + when: ipv4_stack | bool + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Set calico_pool_conf" + set_fact: + calico_pool_conf: '{{ calico.stdout | from_json }}' + when: + - ipv4_stack | bool + - calico is defined + - calico.rc == 0 and calico.stdout + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check if inventory match current cluster configuration" + assert: + that: + - calico_pool_conf.spec.blockSize | int == calico_pool_blocksize | int + - calico_pool_conf.spec.cidr == (calico_pool_cidr | default(kube_pods_subnet)) + - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode + - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode + msg: "Your inventory doesn't match the current cluster configuration" + when: + - ipv4_stack | bool + - calico_pool_conf is defined + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Get Calico {{ calico_pool_name }}-ipv6 configuration" + command: "{{ bin_dir }}/calicoctl.sh get ipPool {{ calico_pool_name }}-ipv6 -o json" + failed_when: false + changed_when: false + check_mode: false + register: calico_ipv6 + run_once: true + when: ipv6_stack | bool + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Set calico_pool_ipv6_conf" + set_fact: + calico_pool_conf: '{{ calico_ipv6.stdout | from_json }}' + when: + - ipv6_stack | bool + - alico_ipv6 is defined + - calico_ipv6.rc == 0 and calico_ipv6.stdout + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check if ipv6 inventory match current cluster configuration" + assert: + that: + - calico_pool_conf.spec.blockSize | int == calico_pool_blocksize_ipv6 | int + - calico_pool_conf.spec.cidr == (calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6)) + - not calico_pool_conf.spec.ipipMode is defined or calico_pool_conf.spec.ipipMode == calico_ipip_mode_ipv6 + - not calico_pool_conf.spec.vxlanMode is defined or calico_pool_conf.spec.vxlanMode == calico_vxlan_mode_ipv6 + msg: "Your ipv6 inventory doesn't match the current cluster configuration" + when: + - ipv6_stack | bool + - calico_pool_ipv6_conf is defined + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check kdd calico_datastore if calico_apiserver_enabled" + assert: + that: calico_datastore == "kdd" + msg: "When using calico apiserver you need to use the kubernetes datastore" + when: + - calico_apiserver_enabled + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check kdd calico_datastore if typha_enabled" + assert: + that: calico_datastore == "kdd" + msg: "When using typha you need to use the kubernetes datastore" + when: + - typha_enabled + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip mode is Never for calico ipv6" + assert: + that: + - "calico_ipip_mode_ipv6 in ['Never']" + msg: "Calico doesn't support ipip tunneling for the IPv6" + when: ipv6_stack | bool + run_once: true + delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml new file mode 100644 index 00000000000..52bd5c452a1 --- /dev/null +++ b/roles/network_plugin/calico/tasks/install.yml @@ -0,0 +1,510 @@ +--- +- name: Calico | Install Wireguard packages + package: + name: "{{ item }}" + state: present + with_items: "{{ calico_wireguard_packages }}" + register: calico_package_install + until: calico_package_install is succeeded + retries: 4 + when: calico_wireguard_enabled + +- name: Calico | Copy calicoctl binary from download dir + copy: + src: "{{ downloads.calicoctl.dest }}" + dest: "{{ bin_dir }}/calicoctl" + mode: "0755" + remote_src: true + +- name: Calico | Create calico certs directory + file: + dest: "{{ calico_cert_dir }}" + state: directory + mode: "0750" + owner: root + group: root + when: calico_datastore == "etcd" + +- name: Calico | Link etcd certificates for calico-node + file: + src: "{{ etcd_cert_dir }}/{{ item.s }}" + dest: "{{ calico_cert_dir }}/{{ item.d }}" + state: hard + mode: "0640" + force: true + with_items: + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} + when: calico_datastore == "etcd" + +- name: Calico | Generate typha certs + include_tasks: typha_certs.yml + when: + - typha_secure + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Generate apiserver certs + include_tasks: calico_apiserver_certs.yml + when: + - calico_apiserver_enabled + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Install calicoctl wrapper script + template: + src: "calicoctl.{{ calico_datastore }}.sh.j2" + dest: "{{ bin_dir }}/calicoctl.sh" + mode: "0755" + owner: root + group: root + +- name: Calico | wait for etcd + uri: + url: "{{ etcd_access_addresses.split(',') | first }}/health" + validate_certs: false + client_cert: "{{ calico_cert_dir }}/cert.crt" + client_key: "{{ calico_cert_dir }}/key.pem" + register: result + until: result.status == 200 or result.status == 401 + retries: 10 + delay: 5 + run_once: true + when: calico_datastore == "etcd" + +- name: Calico | Check if calico network pool has already been configured + # noqa risky-shell-pipe - grep will exit 1 if no match found + shell: > + {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l + args: + executable: /bin/bash + register: calico_conf + retries: 4 + until: calico_conf.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + when: + - inventory_hostname == groups['kube_control_plane'][0] + - ipv4_stack | bool + +- name: Calico | Ensure that calico_pool_cidr is within kube_pods_subnet when defined + assert: + that: "[calico_pool_cidr] | ansible.utils.ipaddr(kube_pods_subnet) | length == 1" + msg: "{{ calico_pool_cidr }} is not within or equal to {{ kube_pods_subnet }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - ipv4_stack | bool + - calico_pool_cidr is defined + - 'calico_conf.stdout == "0"' + +- name: Calico | Check if calico IPv6 network pool has already been configured + # noqa risky-shell-pipe - grep will exit 1 if no match found + shell: > + {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l + args: + executable: /bin/bash + register: calico_conf_ipv6 + retries: 4 + until: calico_conf_ipv6.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + when: + - inventory_hostname == groups['kube_control_plane'][0] + - ipv6_stack + +- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined + assert: + that: "[calico_pool_cidr_ipv6] | ansible.utils.ipaddr(kube_pods_subnet_ipv6) | length == 1" + msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - ipv6_stack | bool + - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" + - calico_pool_cidr_ipv6 is defined + +- name: Calico | kdd specific configuration + when: + - ('kube_control_plane' in group_names) + - calico_datastore == "kdd" + block: + - name: Calico | Check if extra directory is needed + stat: + path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds/{{ 'kdd' if (calico_version is version('3.22.3', '<')) else 'crd' }}" + register: kdd_path + - name: Calico | Set kdd path when calico < v3.22.3 + set_fact: + calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/kdd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" + when: + - calico_version is version('3.22.3', '<') + - name: Calico | Set kdd path when calico > 3.22.2 + set_fact: + calico_kdd_path: "{{ local_release_dir }}/calico-{{ calico_version }}-kdd-crds{{ '/crd' if kdd_path.stat.exists is defined and kdd_path.stat.exists }}" + when: + - calico_version is version('3.22.2', '>') + - name: Calico | Create calico manifests for kdd + assemble: + src: "{{ calico_kdd_path }}" + dest: "{{ kube_config_dir }}/kdd-crds.yml" + mode: "0644" + delimiter: "---\n" + regexp: ".*\\.yaml" + remote_src: true + + - name: Calico | Create Calico Kubernetes datastore resources + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/kdd-crds.yml" + state: "latest" + register: kubectl_result + until: kubectl_result is succeeded + retries: 5 + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Configure Felix + when: + - inventory_hostname == groups['kube_control_plane'][0] + block: + - name: Calico | Get existing FelixConfiguration + command: "{{ bin_dir }}/calicoctl.sh get felixconfig default -o json" + register: _felix_cmd + ignore_errors: true + changed_when: false + + - name: Calico | Set kubespray FelixConfiguration + set_fact: + _felix_config: > + { + "kind": "FelixConfiguration", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "default", + }, + "spec": { + "ipipEnabled": {{ calico_ipip_mode != 'Never' }}, + "reportingInterval": "{{ calico_felix_reporting_interval }}", + "bpfLogLevel": "{{ calico_bpf_log_level }}", + "bpfEnabled": {{ calico_bpf_enabled | bool }}, + "bpfExternalServiceMode": "{{ calico_bpf_service_mode }}", + "wireguardEnabled": {{ calico_wireguard_enabled | bool }}, + "logSeverityScreen": "{{ calico_felix_log_severity_screen }}", + "vxlanEnabled": {{ calico_vxlan_mode != 'Never' }}, + "featureDetectOverride": "{{ calico_feature_detect_override }}", + "floatingIPs": "{{ calico_felix_floating_ips }}" + } + } + + - name: Calico | Process FelixConfiguration + set_fact: + _felix_config: "{{ _felix_cmd.stdout | from_json | combine(_felix_config, recursive=True) }}" + when: + - _felix_cmd is success + + - name: Calico | Configure calico FelixConfiguration + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _felix_config is string | ternary(_felix_config, _felix_config | to_json) }}" + changed_when: false + +- name: Calico | Configure Calico IP Pool + when: + - inventory_hostname == groups['kube_control_plane'][0] + - ipv4_stack | bool + block: + - name: Calico | Get existing calico network pool + command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }} -o json" + register: _calico_pool_cmd + ignore_errors: true + changed_when: false + + - name: Calico | Set kubespray calico network pool + set_fact: + _calico_pool: > + { + "kind": "IPPool", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "{{ calico_pool_name }}", + }, + "spec": { + "blockSize": {{ calico_pool_blocksize }}, + "cidr": "{{ calico_pool_cidr | default(kube_pods_subnet) }}", + "ipipMode": "{{ calico_ipip_mode }}", + "vxlanMode": "{{ calico_vxlan_mode }}", + "natOutgoing": {{ nat_outgoing | default(false) }} + } + } + + - name: Calico | Process calico network pool + when: + - _calico_pool_cmd is success + block: + - name: Calico | Get current calico network pool blocksize + set_fact: + _calico_blocksize: > + { + "spec": { + "blockSize": {{ (_calico_pool_cmd.stdout | from_json).spec.blockSize }} + } + } + - name: Calico | Merge calico network pool + set_fact: + _calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, _calico_blocksize, recursive=True) }}" + + - name: Calico | Configure calico network pool + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _calico_pool is string | ternary(_calico_pool, _calico_pool | to_json) }}" + changed_when: false + +- name: Calico | Configure Calico IPv6 Pool + when: + - inventory_hostname == groups['kube_control_plane'][0] + - ipv6_stack | bool + block: + - name: Calico | Get existing calico ipv6 network pool + command: "{{ bin_dir }}/calicoctl.sh get ippool {{ calico_pool_name }}-ipv6 -o json" + register: _calico_pool_ipv6_cmd + ignore_errors: true + changed_when: false + + - name: Calico | Set kubespray calico network pool + set_fact: + _calico_pool_ipv6: > + { + "kind": "IPPool", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "{{ calico_pool_name }}-ipv6", + }, + "spec": { + "blockSize": {{ calico_pool_blocksize_ipv6 }}, + "cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}", + "ipipMode": "{{ calico_ipip_mode_ipv6 }}", + "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}", + "natOutgoing": {{ nat_outgoing_ipv6 | default(false) }} + } + } + + - name: Calico | Process calico ipv6 network pool + when: + - _calico_pool_ipv6_cmd is success + block: + - name: Calico | Get current calico ipv6 network pool blocksize + set_fact: + _calico_blocksize_ipv6: > + { + "spec": { + "blockSize": {{ (_calico_pool_ipv6_cmd.stdout | from_json).spec.blockSize }} + } + } + - name: Calico | Merge calico ipv6 network pool + set_fact: + _calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, _calico_blocksize_ipv6, recursive=True) }}" + + - name: Calico | Configure calico ipv6 network pool + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _calico_pool_ipv6 is string | ternary(_calico_pool_ipv6, _calico_pool_ipv6 | to_json) }}" + changed_when: false + +- name: Populate Service External IPs + set_fact: + _service_external_ips: "{{ _service_external_ips | default([]) + [{'cidr': item}] }}" + with_items: "{{ calico_advertise_service_external_ips }}" + run_once: true + +- name: Populate Service LoadBalancer IPs + set_fact: + _service_loadbalancer_ips: "{{ _service_loadbalancer_ips | default([]) + [{'cidr': item}] }}" + with_items: "{{ calico_advertise_service_loadbalancer_ips }}" + run_once: true + +- name: "Determine nodeToNodeMesh needed state" + set_fact: + nodeToNodeMeshEnabled: "false" + when: + - peer_with_router | default(false) or peer_with_calico_rr | default(false) + - ('k8s_cluster' in group_names) + run_once: true + +- name: Calico | Configure Calico BGP + when: + - inventory_hostname == groups['kube_control_plane'][0] + block: + - name: Calico | Get existing BGP Configuration + command: "{{ bin_dir }}/calicoctl.sh get bgpconfig default -o json" + register: _bgp_config_cmd + ignore_errors: true + changed_when: false + + - name: Calico | Set kubespray BGP Configuration + set_fact: + # noqa: jinja[spacing] + _bgp_config: > + { + "kind": "BGPConfiguration", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "default", + }, + "spec": { + "listenPort": {{ calico_bgp_listen_port }}, + "logSeverityScreen": "Info", + {% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %} + "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} , + {% if calico_advertise_cluster_ips | default(false) %} + "serviceClusterIPs": >- + {%- if ipv4_stack and ipv6_stack-%} + [{"cidr": "{{ kube_service_addresses }}", "cidr": "{{ kube_service_addresses_ipv6 }}"}], + {%- elif ipv6_stack-%} + [{"cidr": "{{ kube_service_addresses_ipv6 }}"}], + {%- else -%} + [{"cidr": "{{ kube_service_addresses }}"}], + {%- endif -%} + {% endif %} + {% if calico_advertise_service_loadbalancer_ips | length > 0 %}"serviceLoadBalancerIPs": {{ _service_loadbalancer_ips }},{% endif %} + "serviceExternalIPs": {{ _service_external_ips | default([]) }} + } + } + + - name: Calico | Process BGP Configuration + set_fact: + _bgp_config: "{{ _bgp_config_cmd.stdout | from_json | combine(_bgp_config, recursive=True) }}" + when: + - _bgp_config_cmd is success + + - name: Calico | Set up BGP Configuration + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ _bgp_config is string | ternary(_bgp_config, _bgp_config | to_json) }}" + changed_when: false + +- name: Calico | Create calico manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: calico-config, file: calico-config.yml, type: cm} + - {name: calico-node, file: calico-node.yml, type: ds} + - {name: calico, file: calico-node-sa.yml, type: sa} + - {name: calico, file: calico-cr.yml, type: clusterrole} + - {name: calico, file: calico-crb.yml, type: clusterrolebinding} + - {name: kubernetes-services-endpoint, file: kubernetes-services-endpoint.yml, type: cm } + register: calico_node_manifests + when: + - ('kube_control_plane' in group_names) + - rbac_enabled or item.type not in rbac_resources + +- name: Calico | Create calico manifests for typha + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: calico, file: calico-typha.yml, type: typha} + register: calico_node_typha_manifest + when: + - ('kube_control_plane' in group_names) + - typha_enabled + +- name: Calico | get calico apiserver caBundle + command: "{{ bin_dir }}/kubectl get secret -n calico-apiserver calico-apiserver-certs -o jsonpath='{.data.apiserver\\.crt}'" + changed_when: false + register: calico_apiserver_cabundle + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_apiserver_enabled + +- name: Calico | set calico apiserver caBundle fact + set_fact: + calico_apiserver_cabundle: "{{ calico_apiserver_cabundle.stdout }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_apiserver_enabled + +- name: Calico | Create calico manifests for apiserver + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: calico, file: calico-apiserver.yml, type: calico-apiserver} + register: calico_apiserver_manifest + when: + - ('kube_control_plane' in group_names) + - calico_apiserver_enabled + +- name: Start Calico resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_node_manifests.results }}" + - "{{ calico_node_typha_manifest.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + +- name: Start Calico apiserver resources + kube: + name: "{{ item.item.name }}" + namespace: "calico-apiserver" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + with_items: + - "{{ calico_apiserver_manifest.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - not item is skipped + loop_control: + label: "{{ item.item.file }}" + +- name: Wait for calico kubeconfig to be created + wait_for: + path: /etc/cni/net.d/calico-kubeconfig + timeout: "{{ calico_kubeconfig_wait_timeout }}" + when: + - inventory_hostname not in groups['kube_control_plane'] + - calico_datastore == "kdd" + +- name: Calico | Create Calico ipam manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: calico, file: calico-ipamconfig.yml, type: ipam} + when: + - ('kube_control_plane' in group_names) + - calico_datastore == "kdd" + +- name: Calico | Create ipamconfig resources + kube: + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/calico-ipamconfig.yml" + state: "latest" + register: resource_result + until: resource_result is succeeded + retries: 4 + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_datastore == "kdd" + +- name: Calico | Peer with Calico Route Reflector + include_tasks: peer_with_calico_rr.yml + when: + - peer_with_calico_rr | default(false) + +- name: Calico | Peer with the router + include_tasks: peer_with_router.yml + when: + - peer_with_router | default(false) diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml new file mode 100644 index 00000000000..5921a91f338 --- /dev/null +++ b/roles/network_plugin/calico/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Calico Pre tasks + import_tasks: pre.yml + +- name: Calico repos + import_tasks: repos.yml + +- name: Calico install + include_tasks: install.yml diff --git a/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml new file mode 100644 index 00000000000..53b49c1c4a9 --- /dev/null +++ b/roles/network_plugin/calico/tasks/peer_with_calico_rr.yml @@ -0,0 +1,86 @@ +--- +- name: Calico | Set label for groups nodes + command: "{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }} calico-group-id={{ calico_group_id }} --overwrite" + changed_when: false + register: calico_group_id_label + until: calico_group_id_label is succeeded + delay: "{{ retry_stagger | random + 3 }}" + retries: 10 + when: + - calico_group_id is defined + +- name: Calico | Configure peering with route reflectors at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "{{ calico_rr_id }}-to-node" + }, + "spec": { + "peerSelector": "calico-rr-id == '{{ calico_rr_id }}'", + "nodeSelector": "calico-group-id == '{{ calico_group_id }}'" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + when: + - calico_rr_id is defined + - calico_group_id is defined + - ('calico_rr' in group_names) + +- name: Calico | Configure peering with route reflectors at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "peer-to-rrs" + }, + "spec": { + "nodeSelector": "!has(i-am-a-route-reflector)", + "peerSelector": "has(i-am-a-route-reflector)" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ groups['calico_rr'] | default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - calico_rr_id is not defined or calico_group_id is not defined + +- name: Calico | Configure route reflectors to peer with each other + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + # revert when it's already a string + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "rr-mesh" + }, + "spec": { + "nodeSelector": "has(i-am-a-route-reflector)", + "peerSelector": "has(i-am-a-route-reflector)" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ groups['calico_rr'] | default([]) }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/network_plugin/calico/tasks/peer_with_router.yml b/roles/network_plugin/calico/tasks/peer_with_router.yml new file mode 100644 index 00000000000..ec4104bbe6a --- /dev/null +++ b/roles/network_plugin/calico/tasks/peer_with_router.yml @@ -0,0 +1,116 @@ +--- +- name: Calico | Configure peering with router(s) at global scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "global-{{ item.name | default(item.router_id | replace(':', '-')) }}" + }, + "spec": { + "asNumber": "{{ item.as }}", + "peerIP": "{{ item.router_id }}" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ peers | default([]) | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'global') | list }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Calico | Get node for per node peering + command: + cmd: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }}" + register: output_get_node + when: + - ('k8s_cluster' in group_names) + - local_as is defined + - groups['calico_rr'] | default([]) | length == 0 + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Calico | Patch node asNumber for per node peering + command: + cmd: |- + {{ bin_dir }}/calicoctl.sh patch node "{{ inventory_hostname }}" --patch '{{ patch is string | ternary(patch, patch | to_json) }}' + vars: + patch: > + {"spec": { + "bgp": { + "asNumber": "{{ local_as }}" + }, + "orchRefs": [{"nodeName": "{{ inventory_hostname }}", "orchestrator": "k8s"}] + }} + register: output + retries: 0 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ('k8s_cluster' in group_names) + - local_as is defined + - groups['calico_rr'] | default([]) | length == 0 + - output_get_node.rc == 0 + +- name: Calico | Configure node asNumber for per node peering + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "Node", + "metadata": { + "name": "{{ inventory_hostname }}" + }, + "spec": { + "bgp": { + "asNumber": "{{ local_as }}" + }, + "orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}] + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + when: + - ('k8s_cluster' in group_names) + - local_as is defined + - groups['calico_rr'] | default([]) | length == 0 + - output_get_node.rc != 0 + +- name: Calico | Configure peering with router(s) at node scope + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: "{{ stdin is string | ternary(stdin, stdin | to_json) }}" + vars: + stdin: > + {"apiVersion": "projectcalico.org/v3", + "kind": "BGPPeer", + "metadata": { + "name": "{{ inventory_hostname }}-{{ item.name | default(item.router_id | replace(':', '-')) }}" + }, + "spec": { + "asNumber": "{{ item.as }}", + "node": "{{ inventory_hostname }}", + "peerIP": "{{ item.router_id }}", + {% if calico_version is version('3.26.0', '>=') and (item.filters | default([]) | length > 0) %} + "filters": {{ item.filters }}, + {% endif %} + {% if calico_version is version('3.23.0', '>=') and (item.numallowedlocalasnumbers | default(0) > 0) %} + "numAllowedLocalASNumbers": {{ item.numallowedlocalasnumbers }}, + {% endif %} + "sourceAddress": "{{ item.sourceaddress | default('UseNodeIP') }}" + }} + register: output + retries: 4 + until: output.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + with_items: + - "{{ peers | default([]) | selectattr('scope', 'undefined') | list | union(peers | default([]) | selectattr('scope', 'defined') | selectattr('scope', 'equalto', 'node') | list ) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - ('k8s_cluster' in group_names) diff --git a/roles/network_plugin/calico/tasks/pre.yml b/roles/network_plugin/calico/tasks/pre.yml new file mode 100644 index 00000000000..f3f7797cb34 --- /dev/null +++ b/roles/network_plugin/calico/tasks/pre.yml @@ -0,0 +1,36 @@ +--- +- name: Slurp CNI config + slurp: + src: /etc/cni/net.d/10-calico.conflist + register: calico_cni_config_slurp + failed_when: false + +- name: Gather calico facts + tags: + - facts + when: calico_cni_config_slurp.content is defined + block: + - name: Set fact calico_cni_config from slurped CNI config + set_fact: + calico_cni_config: "{{ calico_cni_config_slurp['content'] | b64decode | from_json }}" + - name: Set fact calico_datastore to etcd if needed + set_fact: + calico_datastore: etcd + when: + - "'plugins' in calico_cni_config" + - "'etcd_endpoints' in calico_cni_config.plugins.0" + +- name: Calico | Gather os specific variables + include_vars: "{{ item }}" + with_first_found: + - files: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_architecture }}.yml" + - "{{ ansible_os_family | lower }}.yml" + - defaults.yml + paths: + - ../vars + skip: true diff --git a/roles/network_plugin/calico/tasks/repos.yml b/roles/network_plugin/calico/tasks/repos.yml new file mode 100644 index 00000000000..7eba916bbab --- /dev/null +++ b/roles/network_plugin/calico/tasks/repos.yml @@ -0,0 +1,21 @@ +--- +- name: Calico | Add wireguard yum repo + when: + - calico_wireguard_enabled + block: + + - name: Calico | Add wireguard yum repo + yum_repository: + name: copr:copr.fedorainfracloud.org:jdoss:wireguard + file: _copr:copr.fedorainfracloud.org:jdoss:wireguard + description: Copr repo for wireguard owned by jdoss + baseurl: "{{ calico_wireguard_repo }}" + gpgcheck: true + gpgkey: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/pubkey.gpg + skip_if_unavailable: true + enabled: true + repo_gpgcheck: false + when: + - ansible_os_family in ['RedHat'] + - ansible_distribution not in ['Fedora'] + - ansible_facts['distribution_major_version'] | int < 9 diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml new file mode 100644 index 00000000000..16c85097710 --- /dev/null +++ b/roles/network_plugin/calico/tasks/reset.yml @@ -0,0 +1,30 @@ +--- +- name: Reset | check vxlan.calico network device + stat: + path: /sys/class/net/vxlan.calico + get_attributes: false + get_checksum: false + get_mime: false + register: vxlan + +- name: Reset | remove the network vxlan.calico device created by calico + command: ip link del vxlan.calico + when: vxlan.stat.exists + +- name: Reset | check dummy0 network device + stat: + path: /sys/class/net/dummy0 + get_attributes: false + get_checksum: false + get_mime: false + register: dummy0 + +- name: Reset | remove the network device created by calico + command: ip link del dummy0 + when: dummy0.stat.exists + +- name: Reset | get and remove remaining routes set by bird + shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird " + args: + executable: /bin/bash + changed_when: false diff --git a/roles/network_plugin/calico/tasks/typha_certs.yml b/roles/network_plugin/calico/tasks/typha_certs.yml new file mode 100644 index 00000000000..ad87f5a024c --- /dev/null +++ b/roles/network_plugin/calico/tasks/typha_certs.yml @@ -0,0 +1,52 @@ +--- +- name: Calico | Check if typha-server exists + command: "{{ kubectl }} -n kube-system get secret typha-server" + register: typha_server_secret + changed_when: false + failed_when: false + +- name: Calico | Ensure calico certs dir + file: + path: /etc/calico/certs + state: directory + mode: "0755" + when: typha_server_secret.rc != 0 + +- name: Calico | Copy ssl script for typha certs + template: + src: make-ssl-calico.sh.j2 + dest: "{{ bin_dir }}/make-ssl-typha.sh" + mode: "0755" + + when: typha_server_secret.rc != 0 + +- name: Calico | Copy ssl config for typha certs + copy: + src: openssl.conf + dest: /etc/calico/certs/openssl.conf + mode: "0644" + when: typha_server_secret.rc != 0 + +- name: Calico | Generate typha certs + command: >- + {{ bin_dir }}/make-ssl-typha.sh + -f /etc/calico/certs/openssl.conf + -c {{ kube_cert_dir }} + -d /etc/calico/certs + -s typha + when: typha_server_secret.rc != 0 + +- name: Calico | Create typha tls secrets + command: >- + {{ kubectl }} -n kube-system + create secret tls {{ item.name }} + --cert {{ item.cert }} + --key {{ item.key }} + with_items: + - name: typha-server + cert: /etc/calico/certs/typha-server.crt + key: /etc/calico/certs/typha-server.key + - name: typha-client + cert: /etc/calico/certs/typha-client.crt + key: /etc/calico/certs/typha-client.key + when: typha_server_secret.rc != 0 diff --git a/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 b/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 new file mode 100644 index 00000000000..a1bdfcb4a1a --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2 @@ -0,0 +1,10 @@ +# This is a tech-preview manifest which installs the Calico API server. Note that this manifest is liable to change +# or be removed in future releases without further warning. +# +# Namespace and namespace-scoped resources. +apiVersion: v1 +kind: Namespace +metadata: + labels: + name: calico-apiserver + name: calico-apiserver diff --git a/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 b/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 new file mode 100644 index 00000000000..e49c2b2d08b --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-apiserver.yml.j2 @@ -0,0 +1,301 @@ +# Policy to ensure the API server isn't cut off. Can be modified, but ensure +# that the main API server is always able to reach the Calico API server. +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-apiserver + namespace: calico-apiserver +spec: + podSelector: + matchLabels: + apiserver: "true" + ingress: + - ports: + - protocol: TCP + port: 5443 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: calico-api + namespace: calico-apiserver +spec: + ports: + - name: apiserver + port: 443 + protocol: TCP + targetPort: 5443 + selector: + apiserver: "true" + type: ClusterIP + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + apiserver: "true" + k8s-app: calico-apiserver + name: calico-apiserver + namespace: calico-apiserver +spec: + replicas: 1 + selector: + matchLabels: + apiserver: "true" + strategy: + type: Recreate + template: + metadata: + labels: + apiserver: "true" + k8s-app: calico-apiserver + name: calico-apiserver + namespace: calico-apiserver + spec: + containers: + - args: + - --secure-port=5443 + env: + - name: DATASTORE_TYPE + value: kubernetes + image: {{ calico_apiserver_image_repo }}:{{ calico_apiserver_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + livenessProbe: + httpGet: + path: /version + port: 5443 + scheme: HTTPS + initialDelaySeconds: 90 + periodSeconds: 10 + name: calico-apiserver +{% if calico_version is version('3.28.0', '>=') %} + readinessProbe: + httpGet: + path: /readyz + port: 5443 + scheme: HTTPS + timeoutSeconds: 5 + periodSeconds: 60 +{% else %} + readinessProbe: + exec: + command: + - /code/filecheck + failureThreshold: 5 + initialDelaySeconds: 5 + periodSeconds: 10 +{% endif %} + securityContext: + privileged: false + runAsUser: 0 + volumeMounts: + - mountPath: /code/apiserver.local.config/certificates + name: calico-apiserver-certs + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + restartPolicy: Always + serviceAccount: calico-apiserver + serviceAccountName: calico-apiserver + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + volumes: + - name: calico-apiserver-certs + secret: + secretName: calico-apiserver-certs + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-apiserver + namespace: calico-apiserver + +--- + +# Cluster-scoped resources below here. +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v3.projectcalico.org +spec: + group: projectcalico.org + groupPriorityMinimum: 1500 + caBundle: {{ calico_apiserver_cabundle }} + service: + name: calico-api + namespace: calico-apiserver + port: 443 + version: v3 + versionPriority: 200 + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-crds +rules: +- apiGroups: + - extensions + - networking.k8s.io + - "" + resources: + - networkpolicies + - nodes + - namespaces + - pods + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - globalnetworkpolicies + - networkpolicies + - clusterinformations + - hostendpoints + - globalnetworksets + - networksets + - bgpconfigurations + - bgppeers + - bgpfilters + - felixconfigurations + - kubecontrollersconfigurations + - ippools + - ipamconfigs + - ipreservations + - ipamblocks + - blockaffinities + - caliconodestatuses + - tiers + verbs: + - get + - list + - watch + - create + - update + - delete +{% if calico_version is version('3.28.0', '>=') %} +- apiGroups: + - policy + resourceNames: + - calico-apiserver + resources: + - podsecuritypolicies + verbs: + - use +{% endif %} +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-extension-apiserver-auth-access +rules: +- apiGroups: + - "" + resourceNames: + - extension-apiserver-authentication + resources: + - configmaps + verbs: + - list + - watch + - get +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-webhook-reader +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-access-crds +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-crds +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-delegate-auth +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-apiserver-webhook-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-webhook-reader +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-extension-apiserver-auth-access +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-extension-apiserver-auth-access +subjects: +- kind: ServiceAccount + name: calico-apiserver + namespace: calico-apiserver diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 new file mode 100644 index 00000000000..1e87917ea71 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -0,0 +1,106 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: +{% if calico_datastore == "etcd" %} + etcd_endpoints: "{{ etcd_access_addresses }}" + etcd_ca: "/calico-secrets/ca_cert.crt" + etcd_cert: "/calico-secrets/cert.crt" + etcd_key: "/calico-secrets/key.pem" +{% elif calico_datastore == "kdd" and typha_enabled %} + # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas + # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is + # essential. + typha_service_name: "calico-typha" +{% endif %} +{% if calico_network_backend == 'bird' %} + cluster_type: "kubespray,bgp" + calico_backend: "bird" +{% else %} + cluster_type: "kubespray" + calico_backend: "{{ calico_network_backend }}" +{% endif %} +{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router | default(false) %} + as: "{{ local_as | default(global_as_num) }}" +{% endif -%} + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "{{ calico_cni_name }}", + "cniVersion":"0.3.1", + "plugins":[ + { + {% if calico_datastore == "kdd" %} + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + {% endif %} + "type": "calico", + "log_level": "info", + {% if calico_cni_log_file_path %} + "log_file_path": "{{ calico_cni_log_file_path }}", + {% endif %} + {% if calico_datastore == "etcd" %} + "etcd_endpoints": "{{ etcd_access_addresses }}", + "etcd_cert_file": "{{ calico_cert_dir }}/cert.crt", + "etcd_key_file": "{{ calico_cert_dir }}/key.pem", + "etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt", + {% endif %} + {% if calico_ipam_host_local %} + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + {% else %} + "ipam": { + "type": "calico-ipam", + {% if ipv4_stack %} + "assign_ipv4": "true"{{ ',' if (ipv6_stack and ipv4_stack) }} + {% endif %} + {% if ipv6_stack %} + "assign_ipv6": "true" + {% endif %} + }, + {% endif %} + {% if calico_allow_ip_forwarding %} + "container_settings": { + "allow_ip_forwarding": true + }, + {% endif %} + {% if (calico_feature_control is defined) and (calico_feature_control | length > 0) %} + "feature_control": { + {% for fc in calico_feature_control -%} + {% set fcval = calico_feature_control[fc] -%} + "{{ fc }}": {{ (fcval | string | lower) if (fcval == true or fcval == false) else "\"" + fcval + "\"" }}{{ "," if not loop.last else "" }} + {% endfor -%} + {{- "" }} + }, + {% endif %} + {% if enable_network_policy %} + "policy": { + "type": "k8s" + }, + {% endif %} + {% if calico_mtu is defined and calico_mtu is number %} + "mtu": {{ calico_mtu }}, + {% endif %} + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type":"portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type":"bandwidth", + "capabilities": { + "bandwidth": true + } + } + ] + } diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2 new file mode 100644 index 00000000000..96f59df2955 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -0,0 +1,213 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-cni-plugin +rules: + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - update + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + - clusterinformations + - ippools + - ipreservations + - ipamconfigs + verbs: + - get + - list + - create + - update + - delete +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node + namespace: kube-system +rules: + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + - configmaps + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + - watch + - list +{% if calico_datastore == "kdd" %} + # Used to discover Typhas. + - get +{% endif %} + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch +{% if calico_datastore == "kdd" %} + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Watch for changes to Kubernetes AdminNetworkPolicies. + - apiGroups: ["policy.networking.k8s.io"] + resources: + - adminnetworkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - bgpfilters + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + - tiers + verbs: + - get + - list + - watch + # Calico creates some tiers on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - tiers + verbs: + - create + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: [ "crd.projectcalico.org" ] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + - create + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +{% endif %} + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-cni-plugin + verbs: + - create diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2 new file mode 100644 index 00000000000..add99ba5253 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -0,0 +1,28 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-cni-plugin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-cni-plugin +subjects: +- kind: ServiceAccount + name: calico-cni-plugin + namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 b/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 new file mode 100644 index 00000000000..af7e2117cef --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2 @@ -0,0 +1,8 @@ +apiVersion: crd.projectcalico.org/v1 +kind: IPAMConfig +metadata: + name: default +spec: + autoAllocateBlocks: {{ calico_ipam_autoallocateblocks }} + strictAffinity: {{ calico_ipam_strictaffinity }} + maxBlocksPerHost: {{ calico_ipam_maxblocksperhost }} diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 new file mode 100644 index 00000000000..07433039bdd --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-cni-plugin + namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 new file mode 100644 index 00000000000..d5b509bbafe --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -0,0 +1,509 @@ +--- +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each control plane and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: +{% if calico_datastore == "etcd" %} + kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}" +{% endif %} +{% if calico_felix_prometheusmetricsenabled %} + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ calico_felix_prometheusmetricsport }}" +{% endif %} + spec: + nodeSelector: + {{ calico_ds_nodeselector }} + priorityClassName: system-node-critical + hostNetwork: true + serviceAccountName: calico-node + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + initContainers: +{% if calico_datastore == "kdd" and not calico_ipam_host_local %} + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true +{% endif %} + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" +{% if calico_mtu is defined %} + # CNI MTU Config variable + - name: CNI_MTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" +{% endif %} + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" +{% if calico_datastore == "etcd" %} + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints +{% endif %} +{% if calico_datastore == "kdd" %} + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{% endif %} + volumeMounts: + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # The location of the Calico etcd cluster. +{% if calico_datastore == "etcd" %} + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert +{% elif calico_datastore == "kdd" %} + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" +{% if typha_enabled %} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name +{% if typha_secure %} + - name: FELIX_TYPHACN + value: typha-server + - name: FELIX_TYPHACAFILE + value: /etc/typha-ca/ca.crt + - name: FELIX_TYPHACERTFILE + value: /etc/typha-client/typha-client.crt + - name: FELIX_TYPHAKEYFILE + value: /etc/typha-client/typha-client.key +{% endif %} +{% endif %} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" +{% endif %} +{% if calico_network_backend == 'vxlan' %} + - name: FELIX_VXLANVNI + value: "{{ calico_vxlan_vni }}" + - name: FELIX_VXLANPORT + value: "{{ calico_vxlan_port }}" +{% endif %} + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ calico_endpoint_to_host_action | default('RETURN') }}" + - name: FELIX_HEALTHHOST + value: "{{ calico_healthhost }}" +{% if kube_proxy_mode == 'ipvs' and kube_apiserver_node_port_range is defined %} + - name: FELIX_KUBENODEPORTRANGES + value: "{{ kube_apiserver_node_port_range.split('-')[0] }}:{{ kube_apiserver_node_port_range.split('-')[1] }}" +{% endif %} + - name: FELIX_IPTABLESBACKEND + value: "{{ calico_iptables_backend }}" + - name: FELIX_IPTABLESLOCKTIMEOUTSECS + value: "{{ calico_iptables_lock_timeout_secs }}" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + - name: CALICO_IPV4POOL_IPIP + value: "{{ calico_ipv4pool_ipip }}" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + - name: FELIX_IPV6SUPPORT + value: "{{ ipv6_stack | default(false) }}" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "{{ calico_loglevel }}" + # Set Calico startup logging to "error" + - name: CALICO_STARTUP_LOGLEVEL + value: "{{ calico_node_startup_loglevel }}" + # Enable or disable usage report + - name: FELIX_USAGEREPORTINGENABLED + value: "{{ calico_usage_reporting }}" + # Set MTU for tunnel device used if ipip is enabled +{% if calico_mtu is defined %} + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + value: "{{ calico_veth_mtu | default(calico_mtu) }}" +{% endif %} + - name: FELIX_CHAININSERTMODE + value: "{{ calico_felix_chaininsertmode }}" + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "{{ calico_felix_prometheusmetricsenabled }}" + - name: FELIX_PROMETHEUSMETRICSPORT + value: "{{ calico_felix_prometheusmetricsport }}" + - name: FELIX_PROMETHEUSGOMETRICSENABLED + value: "{{ calico_felix_prometheusgometricsenabled }}" + - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED + value: "{{ calico_felix_prometheusprocessmetricsenabled }}" +{% if calico_ip_auto_method is defined %} + - name: IP_AUTODETECTION_METHOD + value: "{{ calico_ip_auto_method }}" +{% else %} + - name: NODEIP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: IP_AUTODETECTION_METHOD + value: "can-reach=$(NODEIP)" +{% endif %} +{% if ipv4_stack %} + - name: IP + value: "autodetect" +{% else %} + - name: IP + value: none +{% endif %} +{% if ipv6_stack %} + - name: IP6 + value: autodetect +{% endif %} +{% if calico_ip6_auto_method is defined and ipv6_stack %} + - name: IP6_AUTODETECTION_METHOD + value: "{{ calico_ip6_auto_method }}" +{% endif %} +{% if calico_felix_mtu_iface_pattern is defined %} + - name: FELIX_MTUIFACEPATTERN + value: "{{ calico_felix_mtu_iface_pattern }}" +{% endif %} +{% if calico_use_default_route_src_ipaddr | default(false) %} + - name: FELIX_DEVICEROUTESOURCEADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP +{% endif %} + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: FELIX_HEALTHENABLED + value: "true" + - name: FELIX_IGNORELOOSERPF + value: "{{ calico_node_ignorelooserpf }}" + - name: CALICO_MANAGE_CNI + value: "true" +{% if calico_ipam_host_local %} + - name: USE_POD_CIDR + value: "true" +{% endif %} +{% if calico_node_extra_envs is defined %} +{% for key in calico_node_extra_envs %} + - name: {{ key }} + value: "{{ calico_node_extra_envs[key] }}" +{% endfor %} +{% endif %} + securityContext: + privileged: true + resources: + limits: +{% if calico_node_cpu_limit != "0" %} + cpu: {{ calico_node_cpu_limit }} +{% endif %} + memory: {{ calico_node_memory_limit }} + requests: + cpu: {{ calico_node_cpu_requests }} + memory: {{ calico_node_memory_requests }} + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live +{% if calico_network_backend == "bird" %} + - -bird-live +{% endif %} + periodSeconds: 10 + initialDelaySeconds: 10 + timeoutSeconds: {{ calico_node_livenessprobe_timeout | default(10) }} + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node +{% if calico_network_backend == "bird" %} + - -bird-ready +{% endif %} + - -felix-ready + periodSeconds: 10 + timeoutSeconds: {{ calico_node_readinessprobe_timeout | default(10) }} + failureThreshold: 6 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false +{% if calico_datastore == "etcd" %} + - mountPath: /calico-secrets + name: etcd-certs + readOnly: true +{% endif %} + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false +{% if typha_secure %} + - name: typha-client + mountPath: /etc/typha-client + readOnly: true + - name: typha-cacert + subPath: ca.crt + mountPath: /etc/typha-ca/ca.crt + readOnly: true +{% endif %} + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + type: DirectoryOrCreate + - name: var-lib-calico + hostPath: + path: /var/lib/calico + type: DirectoryOrCreate + # Used to install CNI. + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate +{% if calico_datastore == "etcd" %} + # Mount in the etcd TLS secrets. + - name: etcd-certs + hostPath: + path: "{{ calico_cert_dir }}" +{% endif %} + # Mount the global iptables lock file, used by calico/node + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +{% if calico_datastore == "kdd" and not calico_ipam_host_local %} + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks +{% endif %} +{% if typha_enabled and typha_secure %} + - name: typha-client + secret: + secretName: typha-client + items: + - key: tls.crt + path: typha-client.crt + - key: tls.key + path: typha-client.key + - name: typha-cacert + hostPath: + path: "/etc/kubernetes/ssl/" +{% endif %} + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent diff --git a/roles/network_plugin/calico/templates/calico-typha.yml.j2 b/roles/network_plugin/calico/templates/calico-typha.yml.j2 new file mode 100644 index 00000000000..f2cf74f9768 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-typha.yml.j2 @@ -0,0 +1,186 @@ +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha +{% if typha_prometheusmetricsenabled %} + - port: {{ typha_prometheusmetricsport }} + protocol: TCP + targetPort: http-metrics + name: metrics +{% endif %} + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ typha_replicas }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' +{% if typha_prometheusmetricsenabled %} + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ typha_prometheusmetricsport }}" +{% endif %} + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP +{% if typha_prometheusmetricsenabled %} + - containerPort: {{ typha_prometheusmetricsport }} + name: http-metrics + protocol: TCP +{% endif %} + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + - name: TYPHA_MAXCONNECTIONSLOWERLIMIT + value: "{{ typha_max_connections_lower_limit }}" +{% if typha_secure %} + - name: TYPHA_CAFILE + value: /etc/ca/ca.crt + - name: TYPHA_CLIENTCN + value: typha-client + - name: TYPHA_SERVERCERTFILE + value: /etc/typha/server_certificate.pem + - name: TYPHA_SERVERKEYFILE + value: /etc/typha/server_key.pem +{% endif %} +{% if typha_prometheusmetricsenabled %} + # Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "true" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "{{ typha_prometheusmetricsport }}" +{% endif %} +{% if calico_ipam_host_local %} + - name: USE_POD_CIDR + value: "true" +{% endif %} +{% if typha_secure %} + volumeMounts: + - mountPath: /etc/typha + name: typha-server + readOnly: true + - mountPath: /etc/ca/ca.crt + subPath: ca.crt + name: cacert + readOnly: true +{% endif %} + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 +{% if typha_secure %} + volumes: + - name: typha-server + secret: + secretName: typha-server + items: + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + - name: cacert + hostPath: + path: "{{ kube_cert_dir }}" +{% endif %} + +--- + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha diff --git a/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 b/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 new file mode 100644 index 00000000000..fcde4a5e35e --- /dev/null +++ b/roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2 @@ -0,0 +1,6 @@ +#!/bin/bash +ETCD_ENDPOINTS={{ etcd_access_addresses }} \ +ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ +ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ +ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ +{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 b/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 new file mode 100644 index 00000000000..ef89f3986cf --- /dev/null +++ b/roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2 @@ -0,0 +1,8 @@ +#!/bin/bash +DATASTORE_TYPE=kubernetes \ +{% if inventory_hostname in groups['kube_control_plane'] %} +KUBECONFIG=/etc/kubernetes/admin.conf \ +{% else %} +KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \ +{% endif %} +{{ bin_dir }}/calicoctl --allow-version-mismatch "$@" diff --git a/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 b/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 new file mode 100644 index 00000000000..f1e81776edb --- /dev/null +++ b/roles/network_plugin/calico/templates/kubernetes-services-endpoint.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: kube-system + name: kubernetes-services-endpoint +data: +{% if calico_bpf_enabled %} + KUBERNETES_SERVICE_HOST: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + KUBERNETES_SERVICE_PORT: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} diff --git a/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 b/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 new file mode 100644 index 00000000000..94b2022e760 --- /dev/null +++ b/roles/network_plugin/calico/templates/make-ssl-calico.sh.j2 @@ -0,0 +1,102 @@ +#!/bin/bash + +# Author: Smana smainklh@gmail.com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail +usage() +{ + cat << EOF +Create self signed certificates + +Usage : $(basename $0) -f [-d ] + -h | --help : Show this message + -f | --config : Openssl configuration file + -d | --ssldir : Directory where the certificates will be installed + -c | --cadir : Directory where the existing CA is located + -s | --service : Service for the ca + + ex : + $(basename $0) -f openssl.conf -d /srv/ssl +EOF +} + +# Options parsing +while (($#)); do + case "$1" in + -h | --help) usage; exit 0;; + -f | --config) CONFIG=${2}; shift 2;; + -d | --ssldir) SSLDIR="${2}"; shift 2;; + -c | --cadir) CADIR="${2}"; shift 2;; + -s | --service) SERVICE="${2}"; shift 2;; + *) + usage + echo "ERROR : Unknown option" + exit 3 + ;; + esac +done + +if [ -z ${CONFIG} ]; then + echo "ERROR: the openssl configuration file is missing. option -f" + exit 1 +fi +if [ -z ${SSLDIR} ]; then + SSLDIR="/etc/calico/certs" +fi + +tmpdir=$(mktemp -d /tmp/calico_${SERVICE}_certs.XXXXXX) +trap 'rm -rf "${tmpdir}"' EXIT +cd "${tmpdir}" + +mkdir -p ${SSLDIR} ${CADIR} + +# Root CA +if [ -e "$CADIR/ca.key" ]; then + # Reuse existing CA + cp $CADIR/{ca.crt,ca.key} . +else + openssl genrsa -out ca.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -x509 -new -nodes -key ca.key -days {{certificates_duration}} -out ca.crt -subj "/CN=calico-${SERVICE}-ca" > /dev/null 2>&1 +fi + +if [ $SERVICE == "typha" ]; then + # Typha server + openssl genrsa -out typha-server.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key typha-server.key -out typha-server.csr -subj "/CN=typha-server" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in typha-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-server.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + + # Typha client + openssl genrsa -out typha-client.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key typha-client.key -out typha-client.csr -subj "/CN=typha-client" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in typha-client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out typha-client.crt -days {{certificates_duration}} -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + +elif [ $SERVICE == "apiserver" ]; then + # calico-apiserver + openssl genrsa -out apiserver.key {{certificates_key_size}} > /dev/null 2>&1 + openssl req -new -key apiserver.key -out apiserver.csr -subj "/CN=calico-apiserver" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out apiserver.crt -days {{certificates_duration}} -extensions ssl_client_apiserver -extfile ${CONFIG} > /dev/null 2>&1 +else + echo "ERROR: the openssl configuration file is missing. option -s" + exit 1 +fi + +# Install certs +if [ -e "$CADIR/ca.key" ]; then + # No pass existing CA + rm -f ca.crt ca.key +fi + +mv {*.crt,*.key} ${SSLDIR}/ diff --git a/roles/network_plugin/calico/vars/amazon.yml b/roles/network_plugin/calico/vars/amazon.yml new file mode 100644 index 00000000000..83efdcdb084 --- /dev/null +++ b/roles/network_plugin/calico/vars/amazon.yml @@ -0,0 +1,5 @@ +--- +calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-7-$basearch/ +calico_wireguard_packages: + - wireguard-dkms + - wireguard-tools diff --git a/roles/network_plugin/calico/vars/centos-9.yml b/roles/network_plugin/calico/vars/centos-9.yml new file mode 100644 index 00000000000..43df5457a39 --- /dev/null +++ b/roles/network_plugin/calico/vars/centos-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/roles/network_plugin/calico/vars/debian.yml b/roles/network_plugin/calico/vars/debian.yml new file mode 100644 index 00000000000..baf603cfd9c --- /dev/null +++ b/roles/network_plugin/calico/vars/debian.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard diff --git a/roles/network_plugin/calico/vars/fedora.yml b/roles/network_plugin/calico/vars/fedora.yml new file mode 100644 index 00000000000..43df5457a39 --- /dev/null +++ b/roles/network_plugin/calico/vars/fedora.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/roles/network_plugin/calico/vars/opensuse.yml b/roles/network_plugin/calico/vars/opensuse.yml new file mode 100644 index 00000000000..43df5457a39 --- /dev/null +++ b/roles/network_plugin/calico/vars/opensuse.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/roles/network_plugin/calico/vars/redhat-9.yml b/roles/network_plugin/calico/vars/redhat-9.yml new file mode 100644 index 00000000000..43df5457a39 --- /dev/null +++ b/roles/network_plugin/calico/vars/redhat-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/roles/network_plugin/calico/vars/redhat.yml b/roles/network_plugin/calico/vars/redhat.yml new file mode 100644 index 00000000000..a83a8a5fed8 --- /dev/null +++ b/roles/network_plugin/calico/vars/redhat.yml @@ -0,0 +1,4 @@ +--- +calico_wireguard_packages: + - wireguard-dkms + - wireguard-tools diff --git a/roles/network_plugin/calico/vars/rocky-9.yml b/roles/network_plugin/calico/vars/rocky-9.yml new file mode 100644 index 00000000000..43df5457a39 --- /dev/null +++ b/roles/network_plugin/calico/vars/rocky-9.yml @@ -0,0 +1,3 @@ +--- +calico_wireguard_packages: + - wireguard-tools diff --git a/roles/network_plugin/calico_defaults/defaults/main.yml b/roles/network_plugin/calico_defaults/defaults/main.yml new file mode 100644 index 00000000000..da899546b25 --- /dev/null +++ b/roles/network_plugin/calico_defaults/defaults/main.yml @@ -0,0 +1,176 @@ +--- +# the default value of name +calico_cni_name: k8s-pod-network + +# Enables Internet connectivity from containers +nat_outgoing: true +nat_outgoing_ipv6: false + +# add default ippool name +calico_pool_name: "default-pool" +calico_ipv4pool_ipip: "Off" + +# Change encapsulation mode, by default we enable vxlan which is the most mature and well tested mode +calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet' +calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet' + +calico_cni_pool: true +calico_cni_pool_ipv6: true + +# add default ippool blockSize +calico_pool_blocksize: 26 + +# Calico doesn't support ipip tunneling for the IPv6. +calico_ipip_mode_ipv6: Never +calico_vxlan_mode_ipv6: Always + +# add default ipv6 ippool blockSize +calico_pool_blocksize_ipv6: 122 + +# Calico network backend can be 'bird', 'vxlan' and 'none' +calico_network_backend: vxlan + +calico_cert_dir: /etc/calico/certs + +# Global as_num (/calico/bgp/v1/global/as_num) +global_as_num: "64512" + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Advertise Service External IPs +calico_advertise_service_external_ips: [] + +# Advertise Service LoadBalancer IPs +calico_advertise_service_loadbalancer_ips: [] + +# Calico eBPF support +calico_bpf_enabled: false +calico_bpf_log_level: "" +# Valid option for service mode: Tunnel (default), DSR=Direct Server Return +calico_bpf_service_mode: Tunnel + +# Calico floatingIPs support +# Valid option for floatingIPs: Disabled (default), Enabled +calico_felix_floating_ips: Disabled + +# Limits for apps +calico_node_memory_limit: 500M +calico_node_cpu_limit: "0" +calico_node_memory_requests: 64M +calico_node_cpu_requests: 150m +calico_felix_chaininsertmode: Insert + +# Calico daemonset nodeselector +calico_ds_nodeselector: "kubernetes.io/os: linux" + +# Virtual network ID to use for VXLAN traffic. A value of 0 means “use the kernel default”. +calico_vxlan_vni: 4096 + +# Port to use for VXLAN traffic. A value of 0 means “use the kernel default”. +calico_vxlan_port: 4789 + +# Enable Prometheus Metrics endpoint for felix +calico_felix_prometheusmetricsenabled: false +calico_felix_prometheusmetricsport: 9091 +calico_felix_prometheusgometricsenabled: true +calico_felix_prometheusprocessmetricsenabled: true + +# Set the agent log level. Can be debug, warning, info or fatal +calico_loglevel: info +calico_node_startup_loglevel: error + +# Set log path for calico CNI plugin. Set to false to disable logging to disk. +calico_cni_log_file_path: /var/log/calico/cni/cni.log + +# Enable or disable usage report to 'usage.projectcalico.org' +calico_usage_reporting: false + +# Should calico ignore kernel's RPF check setting, +# see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198 +calico_node_ignorelooserpf: false + +# Define address on which Felix will respond to health requests +calico_healthhost: "localhost" + +# Configure time in seconds that calico will wait for the iptables lock +calico_iptables_lock_timeout_secs: 10 + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" (FELIX_IPTABLESBACKEND) +calico_iptables_backend: "Auto" + +# Calico Wireguard support +calico_wireguard_enabled: false +calico_wireguard_packages: [] +calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-{{ ansible_distribution_major_version }}-$basearch/ + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://projectcalico.docs.tigera.io/reference/node/configuration#ip-autodetection-methods +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" + +kube_etcd_cacert_file: ca.pem +kube_etcd_cert_file: node-{{ inventory_hostname }}.pem +kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# The default value for calico_datastore is set in role kubespray-default + +# Use typha (only with kdd) +typha_enabled: false +typha_prometheusmetricsenabled: false +typha_prometheusmetricsport: 9093 + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +typha_replicas: 1 + +# Set max typha connections +typha_max_connections_lower_limit: 300 + +# Generate certifcates for typha<->calico-node communication +typha_secure: false + +calico_feature_control: {} + +# Calico default BGP port +calico_bgp_listen_port: 179 + +# Calico FelixConfiguration options +calico_felix_reporting_interval: 0s +calico_felix_log_severity_screen: Info + +# Calico container settings +calico_allow_ip_forwarding: false + +# Calico IPAM strictAffinity +calico_ipam_strictaffinity: false + +# Calico IPAM autoAllocateBlocks +calico_ipam_autoallocateblocks: true + +# Calico IPAM maxBlocksPerHost, default 0 +calico_ipam_maxblocksperhost: 0 + +# Calico host local IPAM (use node .spec.podCIDR) + +calico_ipam_host_local: false + +# Calico apiserver (only with kdd) +calico_apiserver_enabled: false + +# Calico feature detect override +calico_feature_detect_override: "" + +# Calico kubeconfig wait timeout in seconds +calico_kubeconfig_wait_timeout: 300 diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index 3a2267a5336..9b754b85616 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -1,12 +1,9 @@ --- -cilium_min_version_required: "1.15" - -# remove migrate after 2.29 released -cilium_remove_old_resources: false +cilium_min_version_required: "1.10" # Log-level cilium_debug: false -cilium_mtu: "0" +cilium_mtu: "" cilium_enable_ipv4: "{{ ipv4_stack }}" cilium_enable_ipv6: "{{ ipv6_stack }}" @@ -14,7 +11,7 @@ cilium_enable_ipv6: "{{ ipv6_stack }}" cilium_l2announcements: false # Cilium agent health port -cilium_agent_health_port: "9879" +cilium_agent_health_port: "{%- if cilium_version is version('1.11.6', '>=') -%}9879{%- else -%}9876{%- endif -%}" # Identity allocation mode selects how identities are shared between cilium # nodes by setting how they are stored. The options are "crd" or "kvstore". @@ -29,7 +26,7 @@ cilium_agent_health_port: "9879" # - --synchronize-k8s-nodes # - --identity-allocation-mode=kvstore # - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations -cilium_identity_allocation_mode: crd +cilium_identity_allocation_mode: kvstore # Etcd SSL dirs cilium_cert_dir: /etc/cilium/certs @@ -50,7 +47,7 @@ cilium_tunnel_mode: vxlan cilium_loadbalancer_mode: snat # -- Configure Loadbalancer IP Pools -cilium_loadbalancer_ip_pools: [ ] +cilium_loadbalancer_ip_pools: [] # Optional features cilium_enable_prometheus: false @@ -58,20 +55,24 @@ cilium_enable_prometheus: false cilium_enable_portmap: false # Monitor aggregation level (none/low/medium/maximum) cilium_monitor_aggregation: medium -# Kube Proxy Replacement mode (true/false) -cilium_kube_proxy_replacement: true - -# If not defined `cilium_dns_proxy_enable_transparent_mode`, it will following the Cilium behavior. -# When Cilium is configured to replace kube-proxy, it automatically enables dnsProxy, which will conflict with nodelocaldns. -# You can set `false` avoid conflict with nodelocaldns. -# https://github.com/cilium/cilium/issues/33144 -# cilium_dns_proxy_enable_transparent_mode: +# Kube Proxy Replacement mode (strict/partial) +cilium_kube_proxy_replacement: partial # If upgrading from Cilium < 1.5, you may want to override some of these options # to prevent service disruptions. See also: # http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action cilium_preallocate_bpf_maps: false +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +cilium_enable_legacy_services: false + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +cilium_deploy_additionally: false + # Auto direct nodes routes can be used to advertise pods routes in your cluster # without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). # This works only if you have a L2 connectivity between all your nodes. @@ -103,8 +104,8 @@ cilium_encryption_enabled: false cilium_encryption_type: "ipsec" # Enable encryption for pure node to node traffic. -# This option is only effective when `cilium_encryption_type` is set to `wireguard`. -cilium_encryption_node_encryption: false +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +cilium_ipsec_node_encryption: false # If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. # When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, @@ -118,7 +119,6 @@ cilium_wireguard_userspace_fallback: false # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. cilium_enable_bandwidth_manager: false -cilium_enable_bandwidth_manager_bbr: false # IP Masquerade Agent # https://docs.cilium.io/en/stable/concepts/networking/masquerading/ @@ -141,7 +141,6 @@ cilium_non_masquerade_cidrs: ### Indicates whether to masquerade traffic to the link local prefix. ### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. cilium_masq_link_local: false -cilium_masq_link_local_ipv6: false ### A time interval at which the agent attempts to reload config from disk cilium_ip_masq_resync_interval: 60s @@ -150,10 +149,10 @@ cilium_ip_masq_resync_interval: 60s cilium_enable_hubble: false ### Enable Hubble-ui cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" -### Enable Hubble Metrics (deprecated) +### Enable Hubble Metrics cilium_enable_hubble_metrics: false ### if cilium_enable_hubble_metrics: true -cilium_hubble_metrics: [ ] +cilium_hubble_metrics: {} # - dns # - drop # - tcp @@ -165,29 +164,12 @@ cilium_hubble_install: false ### Enable auto generate certs if cilium_hubble_install: true cilium_hubble_tls_generate: false -cilium_hubble_export_file_max_backups: "5" -cilium_hubble_export_file_max_size_mb: "10" - -cilium_hubble_export_dynamic_enabled: false -cilium_hubble_export_dynamic_config_content: - - name: all - fieldMask: [ ] - includeFilters: [ ] - excludeFilters: [ ] - filePath: "/var/run/cilium/hubble/events.log" - -# Override the DNS suffix that Hubble-Relay uses to resolve its peer service. -# It defaults to the inventory's `dns_domain`. -cilium_hubble_peer_service_cluster_domain: "{{ dns_domain }}" - ### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535 ### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095) # cilium_hubble_event_buffer_capacity: 4095 ### Buffer size of the channel to receive monitor events. # cilium_hubble_event_queue_size: 50 -cilium_gateway_api_enabled: false - # The default IP address management mode is "Cluster Scope". # https://docs.cilium.io/en/stable/concepts/networking/ipam/ cilium_ipam_mode: cluster-pool @@ -212,17 +194,13 @@ cilium_ipam_mode: cluster-pool # Extra arguments for the Cilium agent -cilium_agent_custom_args: [ ] # deprecated -cilium_agent_extra_args: - - --agent-health-port=9879 - - --disable-envoy-version-check - - --hostbin-path=/usr/local/bin # Alternative path +cilium_agent_custom_args: [] # For adding and mounting extra volumes to the cilium agent -cilium_agent_extra_volumes: [ ] -cilium_agent_extra_volume_mounts: [ ] +cilium_agent_extra_volumes: [] +cilium_agent_extra_volume_mounts: [] -cilium_agent_extra_env_vars: [ ] +cilium_agent_extra_env_vars: [] cilium_operator_replicas: 2 @@ -233,26 +211,16 @@ cilium_operator_api_serve_addr: "127.0.0.1:9234" ## cilium_config_extra_vars: ## var1: "value1" ## var2: "value2" -cilium_config_extra_vars: { } +cilium_config_extra_vars: {} # For adding and mounting extra volumes to the cilium operator -cilium_operator_extra_volumes: [ ] -cilium_operator_extra_volume_mounts: [ ] +cilium_operator_extra_volumes: [] +cilium_operator_extra_volume_mounts: [] # Extra arguments for the Cilium Operator -cilium_operator_custom_args: [ ] # deprecated -cilium_operator_extra_args: [ ] - -# Tolerations of the cilium operator -cilium_operator_tolerations: - - operator: "Exists" +cilium_operator_custom_args: [] -# Unique ID of the cluster. Must be unique across all connected -# clusters and in the range of 1 to 255. Only required for Cluster Mesh, -# may be 0 if Cluster Mesh is not used. -cilium_cluster_id: 0 # Name of the cluster. Only relevant when building a mesh of clusters. -# The "default" name cannot be used if the Cluster ID is different from 0. cilium_cluster_name: default # Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. @@ -275,8 +243,8 @@ cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" # Available for Cilium v1.11 and up cilium_cgroup_auto_mount: true # -- Configure cgroup root where cgroup2 filesystem is mounted on the host -# cilium_cgroup_host_root: "/run/cilium/cgroupv2" -cilium_cgroup_host_root: /sys/fs/cgroup +cilium_cgroup_host_root: "/run/cilium/cgroupv2" + # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. cilium_bpf_map_dynamic_size_ratio: "0.0025" @@ -289,13 +257,13 @@ cilium_enable_ipv4_masquerade: true cilium_enable_ipv6_masquerade: true # -- Enable native IP masquerade support in eBPF -cilium_enable_bpf_masquerade: true +cilium_enable_bpf_masquerade: false # -- Configure whether direct routing mode should route traffic via # host stack (true) or directly and more efficiently out of BPF (false) if # the kernel supports it. The latter has the implication that it will also # bypass netfilter in the host namespace. -cilium_enable_host_legacy_routing: false +cilium_enable_host_legacy_routing: true # -- Enable use of the remote node identity. # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity @@ -317,19 +285,19 @@ cilium_enable_bgp_control_plane: false # -- Configure BGP Instances (New bgpv2 API v1.16+) -cilium_bgp_cluster_configs: [ ] +cilium_bgp_cluster_configs: [] # -- Configure BGP Peers (New bgpv2 API v1.16+) -cilium_bgp_peer_configs: [ ] +cilium_bgp_peer_configs: [] # -- Configure BGP Advertisements (New bgpv2 API v1.16+) -cilium_bgp_advertisements: [ ] +cilium_bgp_advertisements: [] # -- Configure BGP Node Config Overrides (New bgpv2 API v1.16+) -cilium_bgp_node_config_overrides: [ ] +cilium_bgp_node_config_overrides: [] # -- Configure BGP Peers (Legacy < v1.16) -cilium_bgp_peering_policies: [ ] +cilium_bgp_peering_policies: [] # -- Whether to enable CNP status updates. cilium_disable_cnp_status_updates: true @@ -339,9 +307,9 @@ cilium_rolling_restart_wait_retries_count: 30 cilium_rolling_restart_wait_retries_delay_seconds: 10 # Cilium changed the default metrics exporter ports in 1.12 -cilium_agent_scrape_port: "9962" -cilium_operator_scrape_port: "9963" -cilium_hubble_scrape_port: "9965" +cilium_agent_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9962', '9090') }}" +cilium_operator_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9963', '6942') }}" +cilium_hubble_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9965', '9091') }}" # Cilium certgen args for generate certificate for hubble mTLS cilium_certgen_args: @@ -360,12 +328,23 @@ cilium_certgen_args: hubble-relay-client-cert-secret-name: hubble-relay-client-certs hubble-relay-server-cert-generate: false +# A list of extra rules variables to add to clusterrole for cilium operator, formatted like: +# cilium_clusterrole_rules_operator_extra_vars: +# - apiGroups: +# - '""' +# resources: +# - pods +# verbs: +# - delete +# - apiGroups: +# - '""' +# resources: +# - nodes +# verbs: +# - list +# - watch +# resourceNames: +# - toto +cilium_clusterrole_rules_operator_extra_vars: [] cilium_enable_host_firewall: false cilium_policy_audit_mode: false - -# Cilium extra install flags -cilium_install_extra_flags: "" - -# Cilium extra values, use any values from cilium Helm Chart -# ref: https://docs.cilium.io/en/stable/helm-reference/ -cilium_extra_values: { } diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml index 18860610709..e63502f9b25 100644 --- a/roles/network_plugin/cilium/tasks/apply.yml +++ b/roles/network_plugin/cilium/tasks/apply.yml @@ -1,19 +1,14 @@ --- -- name: Check if Cilium Helm release exists (via cilium version) - command: "{{ bin_dir }}/cilium version" - register: cilium_release_info - when: inventory_hostname == groups['kube_control_plane'][0] - failed_when: false - changed_when: false - -- name: Set action to install or upgrade - set_fact: - cilium_action: "{{ 'install' if ('release: not found' in cilium_release_info.stderr | default('') or 'release: not found' in cilium_release_info.stdout | default('')) else 'upgrade' }}" - -- name: Cilium | Install - environment: "{{ proxy_env }}" - command: "{{ bin_dir }}/cilium {{ cilium_action }} --version {{ cilium_version }} -f {{ kube_config_dir }}/cilium-values.yaml -f {{ kube_config_dir }}/cilium-extra-values.yaml {{ cilium_install_extra_flags }}" - when: inventory_hostname == groups['kube_control_plane'][0] +- name: Cilium | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - name: Cilium | Wait for pods to run command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa literal-compare @@ -24,6 +19,19 @@ failed_when: false when: inventory_hostname == groups['kube_control_plane'][0] +- name: Cilium | Hubble install + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/hubble/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_hubble_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + - cilium_enable_hubble and cilium_hubble_install + - name: Cilium | Wait for CiliumLoadBalancerIPPool CRD to be present command: "{{ kubectl }} wait --for condition=established --timeout=60s crd/ciliumloadbalancerippools.cilium.io" register: cillium_lbippool_crd_ready @@ -40,7 +48,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - { name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool } + - {name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_lbippool_crd_ready is defined and cillium_lbippool_crd_ready.rc is defined and cillium_lbippool_crd_ready.rc == 0 @@ -54,7 +62,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - { name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool } + - {name: cilium, file: cilium-loadbalancer-ip-pool.yml, type: CiliumLoadBalancerIPPool} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_lbippool_crd_ready is defined and cillium_lbippool_crd_ready.rc is defined and cillium_lbippool_crd_ready.rc == 0 @@ -76,7 +84,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - { name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy } + - {name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpppolicy_crd_ready is defined and cillium_bgpppolicy_crd_ready.rc is defined and cillium_bgpppolicy_crd_ready.rc == 0 @@ -90,7 +98,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - { name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy } + - {name: cilium, file: cilium-bgp-peering-policy.yml, type: CiliumBGPPeeringPolicy} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpppolicy_crd_ready is defined and cillium_bgpppolicy_crd_ready.rc is defined and cillium_bgpppolicy_crd_ready.rc == 0 @@ -112,7 +120,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - { name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig } + - {name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpcconfig_crd_ready is defined and cillium_bgpcconfig_crd_ready.rc is defined and cillium_bgpcconfig_crd_ready.rc == 0 @@ -126,7 +134,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - { name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig } + - {name: cilium, file: cilium-bgp-cluster-config.yml, type: CiliumBGPClusterConfig} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpcconfig_crd_ready is defined and cillium_bgpcconfig_crd_ready.rc is defined and cillium_bgpcconfig_crd_ready.rc == 0 @@ -148,7 +156,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - { name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig } + - {name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgppconfig_crd_ready is defined and cillium_bgppconfig_crd_ready.rc is defined and cillium_bgppconfig_crd_ready.rc == 0 @@ -162,7 +170,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - { name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig } + - {name: cilium, file: cilium-bgp-peer-config.yml, type: CiliumBGPPeerConfig} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgppconfig_crd_ready is defined and cillium_bgppconfig_crd_ready.rc is defined and cillium_bgppconfig_crd_ready.rc == 0 @@ -184,7 +192,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - { name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement } + - {name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpadvert_crd_ready is defined and cillium_bgpadvert_crd_ready.rc is defined and cillium_bgpadvert_crd_ready.rc == 0 @@ -198,7 +206,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - { name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement } + - {name: cilium, file: cilium-bgp-advertisement.yml, type: CiliumBGPAdvertisement} when: - inventory_hostname == groups['kube_control_plane'][0] - cillium_bgpadvert_crd_ready is defined and cillium_bgpadvert_crd_ready.rc is defined and cillium_bgpadvert_crd_ready.rc == 0 @@ -220,7 +228,7 @@ dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" with_items: - - { name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride } + - {name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride} when: - inventory_hostname == groups['kube_control_plane'][0] - cilium_bgp_node_config_crd_ready is defined and cilium_bgp_node_config_crd_ready.rc is defined and cilium_bgp_node_config_crd_ready.rc == 0 @@ -234,7 +242,7 @@ filename: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" state: "latest" loop: - - { name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride } + - {name: cilium, file: cilium-bgp-node-config-override.yml, type: CiliumBGPNodeConfigOverride} when: - inventory_hostname == groups['kube_control_plane'][0] - cilium_bgp_node_config_crd_ready is defined and cilium_bgp_node_config_crd_ready.rc is defined and cilium_bgp_node_config_crd_ready.rc == 0 diff --git a/roles/network_plugin/cilium/tasks/check.yml b/roles/network_plugin/cilium/tasks/check.yml index 7471fe36d01..11fcb23fb68 100644 --- a/roles/network_plugin/cilium/tasks/check.yml +++ b/roles/network_plugin/cilium/tasks/check.yml @@ -18,13 +18,13 @@ when: - cilium_ipsec_enabled is defined - cilium_ipsec_enabled - - kube_network_plugin == 'cilium' or cilium_deploy_additionally + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool - name: Stop if kernel version is too low for Cilium Wireguard encryption assert: that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') when: - - kube_network_plugin == 'cilium' or cilium_deploy_additionally + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool - cilium_encryption_enabled - cilium_encryption_type == "wireguard" - not ignore_assert_errors @@ -48,7 +48,7 @@ msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'" when: cilium_encryption_enabled -- name: Stop if cilium_version is < {{ cilium_min_version_required }} +- name: Stop if cilium_version is < 1.10.0 assert: that: cilium_version is version(cilium_min_version_required, '>=') msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}" diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml index 3819e7e80ff..e6e7e31b4f7 100644 --- a/roles/network_plugin/cilium/tasks/install.yml +++ b/roles/network_plugin/cilium/tasks/install.yml @@ -24,34 +24,70 @@ state: hard force: true loop: - - { s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt" } - - { s: "{{ kube_etcd_cert_file }}", d: "cert.crt" } - - { s: "{{ kube_etcd_key_file }}", d: "key.pem" } + - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"} + - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"} + - {s: "{{ kube_etcd_key_file }}", d: "key.pem"} when: - cilium_identity_allocation_mode == "kvstore" -- name: Cilium | Enable portmap addon +- name: Cilium | Create hubble dir + file: + path: "{{ kube_config_dir }}/addons/hubble" + state: directory + owner: root + group: root + mode: "0755" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - cilium_hubble_install + +- name: Cilium | Create Cilium node manifests template: - src: 000-cilium-portmap.conflist.j2 - dest: /etc/cni/net.d/000-cilium-portmap.conflist + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" - when: cilium_enable_portmap + loop: + - {name: cilium, file: config.yml, type: cm} + - {name: cilium-operator, file: crb.yml, type: clusterrolebinding} + - {name: cilium-operator, file: cr.yml, type: clusterrole} + - {name: cilium, file: crb.yml, type: clusterrolebinding} + - {name: cilium, file: cr.yml, type: clusterrole} + - {name: cilium, file: secret.yml, type: secret, when: "{{ cilium_encryption_enabled and cilium_encryption_type == 'ipsec' }}"} + - {name: cilium, file: ds.yml, type: ds} + - {name: cilium-operator, file: deploy.yml, type: deploy} + - {name: cilium-operator, file: sa.yml, type: sa} + - {name: cilium, file: sa.yml, type: sa} + register: cilium_node_manifests + when: + - ('kube_control_plane' in group_names) + - item.when | default(True) | bool -- name: Cilium | Render values +- name: Cilium | Create Cilium Hubble manifests template: - src: values.yaml.j2 - dest: "{{ kube_config_dir }}/cilium-values.yaml" + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/hubble/{{ item.name }}-{{ item.file }}" mode: "0644" + loop: + - {name: hubble, file: config.yml, type: cm} + - {name: hubble, file: crb.yml, type: clusterrolebinding} + - {name: hubble, file: cr.yml, type: clusterrole} + - {name: hubble, file: cronjob.yml, type: cronjob, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: deploy.yml, type: deploy} + - {name: hubble, file: job.yml, type: job, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: sa.yml, type: sa} + - {name: hubble, file: service.yml, type: service} + register: cilium_hubble_manifests when: - inventory_hostname == groups['kube_control_plane'][0] + - cilium_enable_hubble and cilium_hubble_install + - item.when | default(True) | bool -- name: Cilium | Copy extra values - copy: - content: "{{ cilium_extra_values | to_nice_yaml(indent=2) }}" - dest: "{{ kube_config_dir }}/cilium-extra-values.yaml" +- name: Cilium | Enable portmap addon + template: + src: 000-cilium-portmap.conflist.j2 + dest: /etc/cni/net.d/000-cilium-portmap.conflist mode: "0644" - when: - - inventory_hostname == groups['kube_control_plane'][0] + when: cilium_enable_portmap - name: Cilium | Copy Ciliumcli binary from download dir copy: diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml index dcdad1f94b8..8123c5a4c49 100644 --- a/roles/network_plugin/cilium/tasks/main.yml +++ b/roles/network_plugin/cilium/tasks/main.yml @@ -5,10 +5,5 @@ - name: Cilium install include_tasks: install.yml -# Remove after 2.29 released -- name: Cilium remove old resources - when: cilium_remove_old_resources - include_tasks: remove_old_resources.yml - - name: Cilium apply include_tasks: apply.yml diff --git a/roles/network_plugin/cilium/tasks/remove_old_resources.yml b/roles/network_plugin/cilium/tasks/remove_old_resources.yml deleted file mode 100644 index 93bbcafac58..00000000000 --- a/roles/network_plugin/cilium/tasks/remove_old_resources.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -# Remove after 2.29 released -- name: Cilium | Delete Old Resource - command: | - {{ kubectl }} delete {{ item.kind | lower }} {{ item.name }} \ - {{ '-n kube-system' if item.kind not in ['ClusterRole', 'ClusterRoleBinding'] else '' }} \ - loop: - - { kind: ServiceAccount, name: cilium } - - { kind: ServiceAccount, name: cilium-operator } - - { kind: ServiceAccount, name: hubble-generate-certs } - - { kind: ServiceAccount, name: hubble-relay } - - { kind: ServiceAccount, name: hubble-ui } - - { kind: Service, name: hubble-metrics } - - { kind: Service, name: hubble-relay-metrics } - - { kind: Service, name: hubble-relay } - - { kind: Service, name: hubble-ui } - - { kind: Service, name: hubble-peer } - - { kind: Deployment, name: cilium-operator } - - { kind: Deployment, name: hubble-relay } - - { kind: Deployment, name: hubble-ui } - - { kind: DaemonSet, name: cilium } - - { kind: CronJob, name: hubble-generate-certs } - - { kind: Job, name: hubble-generate-certs } - - { kind: ConfigMap, name: cilium-config } - - { kind: ConfigMap, name: ip-masq-agent } - - { kind: ConfigMap, name: hubble-relay-config } - - { kind: ConfigMap, name: hubble-ui-nginx } - - { kind: ClusterRole, name: cilium } - - { kind: ClusterRole, name: cilium-operator } - - { kind: ClusterRole, name: hubble-generate-certs } - - { kind: ClusterRole, name: hubble-relay } - - { kind: ClusterRole, name: hubble-ui } - - { kind: ClusterRoleBinding, name: cilium } - - { kind: ClusterRoleBinding, name: cilium-operator } - - { kind: ClusterRoleBinding, name: hubble-generate-certs } - - { kind: ClusterRoleBinding, name: hubble-relay } - - { kind: ClusterRoleBinding, name: hubble-ui } - - { kind: Secret, name: hubble-ca-secret } - - { kind: Secret, name: hubble-relay-client-certs } - - { kind: Secret, name: hubble-server-certs } - register: patch_result - when: inventory_hostname == groups['kube_control_plane'][0] - failed_when: - - patch_result.rc != 0 - - "'not found' not in patch_result.stderr" diff --git a/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 new file mode 100644 index 00000000000..038d25fa881 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 @@ -0,0 +1,193 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers +{% if cilium_version is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumloadbalancerippools + - ciliumloadbalancerippools/status + - ciliumbgppeeringpolicies + - ciliumenvoyconfigs +{% endif %} +{% if cilium_version is version('1.15', '>=') %} + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs +{% endif %} +{% if cilium_version is version('1.16', '>=') %} + - ciliumbgpclusterconfigs + - ciliumbgpclusterconfigs/status + - ciliumbgpnodeconfigoverrides +{% endif %} + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +{% if cilium_version is version('1.12', '>=') %} +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumbgploadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumegressnatpolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io +{% if cilium_version is version('1.14', '>=') %} + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io + - ciliumloadbalancerippools.cilium.io +{% endif %} +{% if cilium_version is version('1.15', '>=') %} + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io +{% endif %} +{% endif %} +{% for rules in cilium_clusterrole_rules_operator_extra_vars %} +- apiGroups: +{% for api in rules['apiGroups'] %} + - {{ api }} +{% endfor %} + resources: +{% for resource in rules['resources'] %} + - {{ resource }} +{% endfor %} + verbs: +{% for verb in rules['verbs'] %} + - {{ verb }} +{% endfor %} +{% if 'resourceNames' in rules %} + resourceNames: +{% for resourceName in rules['resourceNames'] %} + - {{ resourceName }} +{% endfor %} +{% endif %} +{% endfor %} diff --git a/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 new file mode 100644 index 00000000000..00f08353531 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 new file mode 100644 index 00000000000..421b908b66f --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 @@ -0,0 +1,170 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: +{% if groups.k8s_cluster | length == 1 %} + replicas: 1 +{% else %} + replicas: {{ cilium_operator_replicas }} +{% endif %} + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{% if cilium_enable_prometheus %} + annotations: + prometheus.io/port: "{{ cilium_operator_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + labels: + io.cilium/app: operator + name: cilium-operator + spec: + containers: + - name: cilium-operator + image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-operator + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) +{% if cilium_operator_custom_args is string %} + - {{ cilium_operator_custom_args }} +{% else %} +{% for flag in cilium_operator_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_ACCESS_KEY_ID + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_SECRET_ACCESS_KEY + optional: true + - name: AWS_DEFAULT_REGION + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_DEFAULT_REGION + optional: true +{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% if cilium_enable_prometheus %} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: prometheus + containerPort: {{ cilium_operator_scrape_port }} + hostPort: {{ cilium_operator_scrape_port }} + protocol: TCP +{% endif %} + livenessProbe: + httpGet: +{% if cilium_enable_ipv4 %} + host: 127.0.0.1 +{% else %} + host: '::1' +{% endif %} + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{ cilium_cert_dir }}" + readOnly: true +{% endif %} +{% for volume_mount in cilium_operator_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }} +{% endfor %} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + tolerations: + - operator: Exists + volumes: + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{ cilium_cert_dir }}" +{% endif %} +{% for volume in cilium_operator_extra_volumes %} + - {{ volume | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} diff --git a/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 new file mode 100644 index 00000000000..c5d1893643b --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 b/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 index 8a2a84031e4..827b2f3ca68 100644 --- a/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 @@ -6,11 +6,7 @@ metadata: name: "{{ cilium_loadbalancer_ip_pool.name }}" spec: blocks: -{% for cblock in cilium_loadbalancer_ip_pool.cidrs | default([]) %} +{% for cblock in cilium_loadbalancer_ip_pool.cidrs %} - cidr: "{{ cblock }}" {% endfor %} -{% for rblock in cilium_loadbalancer_ip_pool.ranges | default([]) %} - - start: "{{ rblock.start }}" - stop: "{{ rblock.stop | default(rblock.start) }}" -{% endfor %} {% endfor %} diff --git a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 new file mode 100644 index 00000000000..83bae464556 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 @@ -0,0 +1,299 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + identity-allocation-mode: {{ cilium_identity_allocation_mode }} + +{% if cilium_identity_allocation_mode == "kvstore" %} + # This etcd-config contains the etcd endpoints of your cluster. If you use + # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config + etcd-config: |- + --- + endpoints: +{% for ip_addr in etcd_access_addresses.split(',') %} + - {{ ip_addr }} +{% endfor %} + + # In case you want to use TLS in etcd, uncomment the 'ca-file' line + # and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config +{% if cilium_version | regex_replace('v') is version('1.17.0', '>=') %} + trusted-ca-file: "{{ cilium_cert_dir }}/ca_cert.crt" +{% else %} + ca-file: "{{ cilium_cert_dir }}/ca_cert.crt" +{% endif %} + + # In case you want client to server authentication, uncomment the following + # lines and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config + key-file: "{{ cilium_cert_dir }}/key.pem" + cert-file: "{{ cilium_cert_dir }}/cert.crt" + + # kvstore + # https://docs.cilium.io/en/latest/cmdref/kvstore/ + kvstore: etcd + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' +{% endif %} + + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. +{% if cilium_enable_prometheus %} + prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}" + operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}" + enable-metrics: "true" +{% endif %} + + # If you want to run cilium in debug mode change this value to true + debug: "{{ cilium_debug }}" + enable-ipv4: "{{ cilium_enable_ipv4 }}" + enable-ipv6: "{{ cilium_enable_ipv6 }}" + # If a serious issue occurs during Cilium startup, this + # invasive option may be set to true to remove all persistent + # state. Endpoints will not be restored using knowledge from a + # prior Cilium run, so they may receive new IP addresses upon + # restart. This also triggers clean-cilium-bpf-state. + clean-cilium-state: "false" + # If you want to clean cilium BPF state, set this to true; + # Removes all BPF maps from the filesystem. Upon restart, + # endpoints are restored with the same IP addresses, however + # any ongoing connections may be disrupted briefly. + # Loadbalancing decisions will be reset, so any ongoing + # connections via a service may be loadbalanced to a different + # backend after restart. + clean-cilium-bpf-state: "false" + + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "false" + +{% if cilium_version is version('1.14.0', '>=') %} + # Tell the agent to generate and write a CNI configuration file + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist + cni-exclusive: "{{ cilium_cni_exclusive }}" + cni-log-file: "{{ cilium_cni_log_file }}" +{% endif %} + + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: "{{ cilium_monitor_aggregation }}" + + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + bpf-ct-global-tcp-max: "524288" + bpf-ct-global-any-max: "262144" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{ cilium_preallocate_bpf_maps }}" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve +{% if cilium_version is version('1.14.0', '<') %} + tunnel: "{{ cilium_tunnel_mode }}" +{% elif cilium_version is version('1.14.0', '>=') and cilium_tunnel_mode == 'disabled' %} + routing-mode: 'native' +{% elif cilium_version is version('1.14.0', '>=') and cilium_tunnel_mode != 'disabled' %} + routing-mode: 'tunnel' + tunnel-protocol: "{{ cilium_tunnel_mode }}" +{% endif %} + + ## DSR setting + bpf-lb-mode: "{{ cilium_loadbalancer_mode }}" + + # l2 + enable-l2-announcements: "{{ cilium_l2announcements }}" + + # Enable Bandwidth Manager + # Cilium's bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. + # Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. + # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. + # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +{% if cilium_enable_bandwidth_manager %} + enable-bandwidth-manager: "true" +{% endif %} + + # Host Firewall and Policy Audit Mode + enable-host-firewall: "{{ cilium_enable_host_firewall | capitalize }}" + policy-audit-mode: "{{ cilium_policy_audit_mode | capitalize }}" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: "{{ cilium_cluster_name }}" + + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + #cluster-id: 1 +{% if cilium_cluster_id is defined %} + cluster-id: "{{ cilium_cluster_id }}" +{% endif %} + +# `wait-bpf-mount` is removed after v1.10.4 +# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da +{% if cilium_version is version('1.10.4', '<') %} + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" +{% endif %} + +# `kube-proxy-replacement=partial|strict|disabled` is deprecated since january 2024 and unsupported in 1.16. +# Replaced by `kube-proxy-replacement=true|false` +# https://github.com/cilium/cilium/pull/31286 +{% if cilium_version is version('1.16', '<') %} + kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}" +{% else %} + kube-proxy-replacement: "{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %}true{% else %}false{% endif %}" +{% endif %} + +# `native-routing-cidr` is deprecated in 1.10, removed in 1.12. +# Replaced by `ipv4-native-routing-cidr` +# https://github.com/cilium/cilium/pull/16695 +{% if cilium_version is version('1.12', '<') %} + native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% else %} +{% if cilium_native_routing_cidr | length %} + ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% endif %} +{% if cilium_native_routing_cidr_ipv6 | length %} + ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}" +{% endif %} +{% endif %} + + auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}" + + operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}" + + # Hubble settings +{% if cilium_enable_hubble %} + enable-hubble: "true" +{% if cilium_enable_hubble_metrics %} + hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}" + hubble-metrics: +{% for hubble_metrics_cycle in cilium_hubble_metrics %} + {{ hubble_metrics_cycle }} +{% endfor %} +{% endif %} +{% if cilium_hubble_event_buffer_capacity is defined %} + hubble-event-buffer-capacity: "{{ cilium_hubble_event_buffer_capacity }}" +{% endif %} +{% if cilium_hubble_event_queue_size is defined %} + hubble-event-queue-size: "{{ cilium_hubble_event_queue_size }}" +{% endif %} + hubble-listen-address: ":4244" +{% if cilium_enable_hubble and cilium_hubble_install %} + hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt +{% endif %} +{% endif %} + + # IP Masquerade Agent + enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}" + +{% for key, value in cilium_config_extra_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} + + # Enable transparent network encryption +{% if cilium_encryption_enabled %} +{% if cilium_encryption_type == "ipsec" %} + enable-ipsec: "true" + ipsec-key-file: /etc/ipsec/keys + encrypt-node: "{{ cilium_ipsec_node_encryption }}" +{% endif %} + +{% if cilium_encryption_type == "wireguard" %} + enable-wireguard: "true" + enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}" +{% endif %} +{% endif %} + + # IPAM settings + ipam: "{{ cilium_ipam_mode }}" +{% if cilium_ipam_mode == "cluster-pool" %} + cluster-pool-ipv4-cidr: "{{ cilium_pool_cidr | default(kube_pods_subnet) }}" + cluster-pool-ipv4-mask-size: "{{ cilium_pool_mask_size | default(kube_network_node_prefix) }}" +{% if cilium_enable_ipv6 %} + cluster-pool-ipv6-cidr: "{{ cilium_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" + cluster-pool-ipv6-mask-size: "{{ cilium_pool_mask_size_ipv6 | default(kube_network_node_prefix_ipv6) }}" +{% endif %} +{% endif %} + + agent-health-port: "{{ cilium_agent_health_port }}" + +{% if cilium_version is version('1.11', '>=') and cilium_cgroup_host_root != '' %} + cgroup-root: "{{ cilium_cgroup_host_root }}" +{% endif %} + + bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}" + + enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}" + enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}" + + enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}" + + enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}" + + enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}" + + enable-well-known-identities: "{{ cilium_enable_well_known_identities }}" + + monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}" + + enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}" + + enable-bgp-control-plane: "{{ cilium_enable_bgp_control_plane }}" + + disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}" +{% if cilium_ip_masq_agent_enable %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ip-masq-agent + namespace: kube-system +data: + config: | + nonMasqueradeCIDRs: +{% for cidr in cilium_non_masquerade_cidrs %} + - {{ cidr }} +{% endfor %} + masqLinkLocal: {{ cilium_masq_link_local | bool }} + resyncInterval: "{{ cilium_ip_masq_resync_interval }}" +{% endif %} diff --git a/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 new file mode 100644 index 00000000000..055c0f43c23 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 @@ -0,0 +1,166 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +{% if cilium_version is version('1.12', '<') %} +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - update +{% endif %} +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + # Deprecated for removal in v1.10 + - create + - list + - watch + - update + + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints + - ciliumendpoints/status + - ciliumnodes + - ciliumnodes/status + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumegressnatpolicies +{% if cilium_version is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumbgppeeringpolicies +{% if cilium_version is version('1.13', '>=') %} + - ciliumloadbalancerippools +{% endif %} +{% endif %} +{% if cilium_version is version('1.11.5', '<') %} + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints/finalizers + - ciliumnodes/finalizers + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies/finalizers +{% endif %} +{% if cilium_version is version('1.14', '>=') %} + - ciliuml2announcementpolicies/status +{% endif %} +{% if cilium_version is version('1.15', '>=') %} + - ciliumbgpnodeconfigs + - ciliumbgpnodeconfigs/status + - ciliumbgpadvertisements + - ciliumbgppeerconfigs +{% endif %} +{% if cilium_version is version('1.16', '>=') %} + - ciliumbgpclusterconfigs +{% endif %} + verbs: + - '*' +{% if cilium_version is version('1.12', '>=') %} +- apiGroups: + - cilium.io + resources: + - ciliumclusterwideenvoyconfigs + - ciliumenvoyconfigs + - ciliumegressgatewaypolicies + verbs: + - list + - watch +{% endif %} +{% if cilium_version is version('1.14', '>=') %} +- apiGroups: + - cilium.io + resources: + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumloadbalancerippools + - ciliuml2announcementpolicies/status + verbs: + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - list + - delete +{% endif %} diff --git a/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 new file mode 100644 index 00000000000..d23897fa04b --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 new file mode 100644 index 00000000000..8371d85d106 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 @@ -0,0 +1,446 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cilium + namespace: kube-system + labels: + k8s-app: cilium +spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + # Specifies the maximum number of Pods that can be unavailable during the update process. + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: +{% if cilium_enable_prometheus %} + prometheus.io/port: "{{ cilium_agent_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' + labels: + k8s-app: cilium + spec: + containers: + - name: cilium-agent + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map +{% if cilium_mtu != "" %} + - --mtu={{ cilium_mtu }} +{% endif %} +{% if cilium_agent_custom_args is string %} + - {{ cilium_agent_custom_args }} +{% else %} +{% for flag in cilium_agent_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + startupProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 105 + periodSeconds: 2 + successThreshold: 1 + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ +{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% for env_var in cilium_agent_extra_env_vars %} + - {{ env_var | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} + lifecycle: +{% if cilium_version is version('1.14', '<') %} + postStart: + exec: + command: + - "/cni-install.sh" + - "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}" +{% if cilium_version is version('1.12', '>=') %} + - "--enable-debug={{ cilium_debug | string | lower }}" + - "--log-file={{ cilium_cni_log_file }}" +{% endif %} +{% endif %} + preStop: + exec: + command: + - /cni-uninstall.sh + resources: + limits: + cpu: {{ cilium_cpu_limit }} + memory: {{ cilium_memory_limit }} + requests: + cpu: {{ cilium_cpu_requests }} + memory: {{ cilium_memory_requests }} +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} + ports: +{% endif %} +{% if cilium_enable_prometheus %} + - name: prometheus + containerPort: {{ cilium_agent_scrape_port }} + hostPort: {{ cilium_agent_scrape_port }} + protocol: TCP +{% endif %} +{% if cilium_enable_hubble_metrics %} + - name: hubble-metrics + containerPort: {{ cilium_hubble_scrape_port }} + hostPort: {{ cilium_hubble_scrape_port }} + protocol: TCP +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + - name: cilium-run + mountPath: /var/run/cilium +{% if cilium_version is version('1.13.1', '<') %} + - name: cni-path + mountPath: /host/opt/cni/bin +{% endif %} + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{ cilium_cert_dir }}" + readOnly: true +{% endif %} + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh + readOnly: true + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + mountPath: /etc/config + readOnly: true +{% endif %} + # Needed to be able to load kernel modules + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + mountPath: /etc/ipsec + readOnly: true +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + mountPath: /var/lib/cilium/tls/hubble + readOnly: true +{% endif %} +{% for volume_mount in cilium_agent_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + hostNetwork: true + initContainers: +{% if cilium_version is version('1.11', '>=') and cilium_cgroup_auto_mount %} + - name: mount-cgroup + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: CGROUP_ROOT + value: {{ cilium_cgroup_host_root }} + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} +{% if cilium_version is version('1.11.7', '>=') %} + - name: apply-sysctl-overwrites + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} + - name: clean-cilium-state + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-state + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-bpf-state + optional: true +# Removed in 1.11 and up. +# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9 +{% if cilium_version is version('1.11', '<') %} + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true +{% endif %} +{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf +{% if cilium_version is version('1.11', '>=') %} + # Required to mount cgroup filesystem from the host to cilium agent pod + - name: cilium-cgroup + mountPath: {{ cilium_cgroup_host_root }} + mountPropagation: HostToContainer +{% endif %} + - name: cilium-run + mountPath: /var/run/cilium + resources: + requests: + cpu: 100m + memory: 100Mi +{% if cilium_version is version('1.13.1', '>=') %} + # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent + - name: install-cni-binaries + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/install-plugin.sh" + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin +{% endif %} + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + hostNetwork: true +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + # To keep state between restarts / upgrades for bpf maps + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate +{% if cilium_version is version('1.11', '>=') %} + # To mount cgroup2 filesystem on the host + - name: hostproc + hostPath: + path: /proc + type: Directory + # To keep state between restarts / upgrades for cgroup2 filesystem + - name: cilium-cgroup + hostPath: + path: {{ cilium_cgroup_host_root }} + type: DirectoryOrCreate +{% endif %} + # To install cilium cni plugin in the host + - name: cni-path + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + # To install cilium cni configuration in the host + - name: etc-cni-netd + hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + # To be able to load kernel modules + - name: lib-modules + hostPath: + path: /lib/modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{ cilium_cert_dir }}" +{% endif %} + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + secretName: cilium-clustermesh + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + optional: true + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + configMap: + name: ip-masq-agent + optional: true + items: + - key: config + path: ip-masq-agent +{% endif %} +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + secret: + secretName: cilium-ipsec-keys +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: hubble-server-certs + optional: true + items: + - key: ca.crt + path: client-ca.crt + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key +{% endif %} diff --git a/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 new file mode 100644 index 00000000000..c03ac59b49b --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 b/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 new file mode 100644 index 00000000000..776c6893800 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +data: + keys: {{ cilium_ipsec_key }} +kind: Secret +metadata: + name: cilium-ipsec-keys + namespace: kube-system +type: Opaque diff --git a/roles/network_plugin/cilium/templates/hubble/config.yml.j2 b/roles/network_plugin/cilium/templates/hubble/config.yml.j2 new file mode 100644 index 00000000000..f3af7174110 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/config.yml.j2 @@ -0,0 +1,71 @@ +#jinja2: trim_blocks:False +--- +# Source: cilium helm chart: cilium/templates/hubble-relay/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + cluster-name: "{{ cilium_cluster_name }}" + peer-service: "hubble-peer.kube-system.svc.{{ dns_domain }}:443" + listen-address: :4245 + metrics-listen-address: ":9966" + dial-timeout: + retry-timeout: + sort-buffer-len-max: + sort-buffer-drain-timeout: + tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt + tls-server-key-file: /var/lib/hubble-relay/tls/server.key + tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt + disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} + disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} +--- +# Source: cilium/templates/hubble-ui/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-ui-nginx + namespace: kube-system +data: + nginx.conf: | + server { + listen 8081; + {% if cilium_enable_ipv6 %} + listen [::]:8081; + {% endif %} + server_name localhost; + root /app; + index index.html; + client_max_body_size 1G; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + + # CORS + add_header Access-Control-Allow-Methods "GET, POST, PUT, HEAD, DELETE, OPTIONS"; + add_header Access-Control-Allow-Origin *; + add_header Access-Control-Max-Age 1728000; + add_header Access-Control-Expose-Headers content-length,grpc-status,grpc-message; + add_header Access-Control-Allow-Headers range,keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout; + if ($request_method = OPTIONS) { + return 204; + } + # /CORS + + location /api { + proxy_http_version 1.1; + proxy_pass_request_headers on; + proxy_hide_header Access-Control-Allow-Origin; + proxy_pass http://127.0.0.1:8090; + } + + location / { + try_files $uri $uri/ /index.html; + } + } + } diff --git a/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 b/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 new file mode 100644 index 00000000000..ee974b5e35b --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 @@ -0,0 +1,108 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hubble-generate-certs +rules: + - apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-server-certs + - hubble-relay-client-certs + - hubble-relay-server-certs + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - hubble-ca-cert + verbs: + - update + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-ca-secret + verbs: + - get +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +rules: + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +rules: + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - "*" + verbs: + - get + - list + - watch +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 b/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 new file mode 100644 index 00000000000..e5b8976e80e --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 @@ -0,0 +1,46 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hubble-generate-certs +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-generate-certs +subjects: +- kind: ServiceAccount + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-relay +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-relay +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-ui +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-ui +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 b/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 new file mode 100644 index 00000000000..8010c5252f0 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 @@ -0,0 +1,38 @@ +--- +# Source: cilium/templates/hubble-generate-certs-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + schedule: "0 0 1 */4 *" + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + {% for key, value in cilium_certgen_args.items() -%} + - "--{{ key }}={{ value }}" + {% endfor %} + + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 b/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 new file mode 100644 index 00000000000..fbd3b2fa859 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 @@ -0,0 +1,199 @@ +--- +# Source: cilium/templates/hubble-relay-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hubble-relay + labels: + k8s-app: hubble-relay + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + k8s-app: hubble-relay + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "k8s-app" + operator: In + values: + - cilium + topologyKey: "kubernetes.io/hostname" + containers: + - name: hubble-relay + image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - hubble-relay + args: + - serve + ports: + - name: grpc + containerPort: 4245 +{% if cilium_enable_prometheus %} + - name: prometheus + containerPort: 9966 + protocol: TCP +{% endif %} + readinessProbe: + tcpSocket: + port: grpc + livenessProbe: + tcpSocket: + port: grpc + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + {% if cilium_hubble_tls_generate -%} + - mountPath: /var/lib/hubble-relay/tls + name: tls + readOnly: true + {%- endif %} + + restartPolicy: Always + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 + volumes: + - configMap: + name: hubble-relay-config + items: + - key: config.yaml + path: config.yaml + name: config + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + {% if cilium_hubble_tls_generate -%} + - projected: + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: ca.crt + path: hubble-server-ca.crt + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + - secret: + name: hubble-server-certs + items: + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key + name: tls + {%- endif %} + +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + namespace: kube-system + labels: + k8s-app: hubble-ui + name: hubble-ui +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-ui + template: + metadata: + annotations: + labels: + k8s-app: hubble-ui + spec: + securityContext: + runAsUser: 1001 + serviceAccount: hubble-ui + serviceAccountName: hubble-ui + containers: + - name: frontend + image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 8081 + name: http + volumeMounts: + - name: hubble-ui-nginx-conf + mountPath: /etc/nginx/conf.d/default.conf + subPath: nginx.conf + - name: tmp-dir + mountPath: /tmp + resources: + {} + - name: backend + image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: EVENTS_SERVER_PORT + value: "8090" + {% if cilium_hubble_tls_generate -%} + - name: TLS_TO_RELAY_ENABLED + value: "true" + - name: FLOWS_API_ADDR + value: "hubble-relay:443" + - name: TLS_RELAY_SERVER_NAME + value: ui.{{ cilium_cluster_name }}.hubble-grpc.cilium.io + - name: TLS_RELAY_CA_CERT_FILES + value: /var/lib/hubble-ui/certs/hubble-server-ca.crt + - name: TLS_RELAY_CLIENT_CERT_FILE + value: /var/lib/hubble-ui/certs/client.crt + - name: TLS_RELAY_CLIENT_KEY_FILE + value: /var/lib/hubble-ui/certs/client.key + {% else -%} + - name: FLOWS_API_ADDR + value: "hubble-relay:80" + {% endif %} + + volumeMounts: + - name: tls + mountPath: /var/lib/hubble-ui/certs + readOnly: true + ports: + - containerPort: 8090 + name: grpc + resources: + {} + volumes: + - configMap: + defaultMode: 420 + name: hubble-ui-nginx + name: hubble-ui-nginx-conf + - projected: + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: ca.crt + path: hubble-server-ca.crt + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + name: tls + - emptyDir: {} + name: tmp-dir +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/job.yml.j2 b/roles/network_plugin/cilium/templates/hubble/job.yml.j2 new file mode 100644 index 00000000000..9ad3ae318a6 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/job.yml.j2 @@ -0,0 +1,34 @@ +--- +# Source: cilium/templates/hubble-generate-certs-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + {% for key, value in cilium_certgen_args.items() -%} + - "--{{ key }}={{ value }}" + {% endfor %} + + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 b/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 new file mode 100644 index 00000000000..46de08179d4 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 @@ -0,0 +1,25 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-relay + namespace: kube-system +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-ui + namespace: kube-system +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/service.yml.j2 b/roles/network_plugin/cilium/templates/hubble/service.yml.j2 new file mode 100644 index 00000000000..982487cb023 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/service.yml.j2 @@ -0,0 +1,106 @@ +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} +--- +# Source: cilium/templates/cilium-agent-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-metrics + namespace: kube-system + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ cilium_hubble_scrape_port }}" + labels: + k8s-app: hubble +spec: + clusterIP: None + type: ClusterIP + ports: + - name: hubble-metrics + port: 9091 + protocol: TCP + targetPort: hubble-metrics + selector: + k8s-app: cilium +--- +# Source: cilium/templates/hubble-relay/metrics-service.yaml +# We use a separate service from hubble-relay which can be exposed externally +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay-metrics + namespace: kube-system + labels: + k8s-app: hubble-relay + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: "9966" +spec: + clusterIP: None + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - name: metrics + port: 9966 + protocol: TCP + targetPort: prometheus + +{% endif %} +--- +# Source: cilium/templates/hubble-relay-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay + namespace: kube-system + labels: + k8s-app: hubble-relay +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - protocol: TCP + {% if cilium_hubble_tls_generate -%} + port: 443 + {% else -%} + port: 80 + {% endif -%} + targetPort: 4245 +--- +{% if cilium_enable_hubble_ui %} +# Source: cilium/templates/hubble-ui-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-ui + labels: + k8s-app: hubble-ui + namespace: kube-system +spec: + selector: + k8s-app: hubble-ui + ports: + - name: http + port: 80 + targetPort: 8081 + type: ClusterIP +--- +{% endif %} +# Source: cilium/templates/hubble/peer-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: hubble-peer + namespace: kube-system + labels: + k8s-app: cilium +spec: + selector: + k8s-app: cilium + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + internalTrafficPolicy: Local diff --git a/roles/network_plugin/cilium/templates/values.yaml.j2 b/roles/network_plugin/cilium/templates/values.yaml.j2 deleted file mode 100644 index 2bd101c2f43..00000000000 --- a/roles/network_plugin/cilium/templates/values.yaml.j2 +++ /dev/null @@ -1,172 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True -MTU: {{ cilium_mtu }} -debug: - enabled: {{ cilium_debug | to_json }} - -image: - repository: {{ cilium_image_repo }} - tag: {{ cilium_image_tag }} - -k8sServiceHost: "auto" -k8sServicePort: "auto" - -ipv4: - enabled: {{ cilium_enable_ipv4 | to_json }} -ipv6: - enabled: {{ cilium_enable_ipv6 | to_json }} - -l2announcements: - enabled: {{ cilium_l2announcements | to_json }} - -bgpControlPlane: - enabled: {{ cilium_enable_bgp_control_plane | to_json }} - -healthPort: {{ cilium_agent_health_port }} - -identityAllocationMode: {{ cilium_identity_allocation_mode }} - -tunnelProtocol: {{ cilium_tunnel_mode }} - -loadbalancer: - mode: {{ cilium_loadbalancer_mode }} - -kubeProxyReplacement: {{ cilium_kube_proxy_replacement | to_json }} - -{% if cilium_dns_proxy_enable_transparent_mode is defined %} -dnsProxy: - enableTransparentMode: {{ cilium_dns_proxy_enable_transparent_mode | to_json }} -{% endif %} - -extraVolumes: - {{ cilium_agent_extra_volumes | to_nice_yaml(indent=2) | indent(2) }} - -extraVolumeMounts: - {{ cilium_agent_extra_volume_mounts | to_nice_yaml(indent=2) | indent(2) }} - -extraArgs: - {{ cilium_agent_extra_args | to_nice_yaml(indent=2) | indent(2) }} - -bpf: - masquerade: {{ cilium_enable_bpf_masquerade | to_json }} - hostLegacyRouting: {{ cilium_enable_host_legacy_routing | to_json }} - monitorAggregation: {{ cilium_monitor_aggregation }} - preallocateMaps: {{ cilium_preallocate_bpf_maps | to_json }} - mapDynamicSizeRatio: {{ cilium_bpf_map_dynamic_size_ratio }} - -cni: - exclusive: {{ cilium_cni_exclusive | to_json }} - logFile: {{ cilium_cni_log_file }} - -autoDirectNodeRoutes: {{ cilium_auto_direct_node_routes | to_json }} - -ipv4NativeRoutingCIDR: {{ cilium_native_routing_cidr }} -ipv6NativeRoutingCIDR: {{ cilium_native_routing_cidr_ipv6 }} - -encryption: - enabled: {{ cilium_encryption_enabled | to_json }} -{% if cilium_encryption_enabled %} - type: {{ cilium_encryption_type }} -{% if cilium_encryption_type == 'wireguard' %} - nodeEncryption: {{ cilium_encryption_node_encryption | to_json }} -{% endif %} -{% endif %} - -bandwidthManager: - enabled: {{ cilium_enable_bandwidth_manager | to_json }} - bbr: {{ cilium_enable_bandwidth_manager_bbr | to_json }} - -ipMasqAgent: - enabled: {{ cilium_ip_masq_agent_enable | to_json }} -{% if cilium_ip_masq_agent_enable %} - config: - nonMasqueradeCIDRs: {{ cilium_non_masquerade_cidrs }} - masqLinkLocal: {{ cilium_masq_link_local | to_json }} - masqLinkLocalIPv6: {{ cilium_masq_link_local_ipv6 | to_json }} - # cilium_ip_masq_resync_interval -{% endif %} - -hubble: - peerService: - clusterDomain: {{ cilium_hubble_peer_service_cluster_domain }} - enabled: {{ cilium_enable_hubble | to_json }} - relay: - enabled: {{ cilium_enable_hubble | to_json }} - image: - repository: {{ cilium_hubble_relay_image_repo }} - tag: {{ cilium_hubble_relay_image_tag }} - ui: - enabled: {{ cilium_enable_hubble_ui | to_json }} - backend: - image: - repository: {{ cilium_hubble_ui_backend_image_repo }} - tag: {{ cilium_hubble_ui_backend_image_tag }} - frontend: - image: - repository: {{ cilium_hubble_ui_image_repo }} - tag: {{ cilium_hubble_ui_image_tag }} - metrics: - enabled: {{ cilium_hubble_metrics | to_json }} - export: - fileMaxBackups: {{ cilium_hubble_export_file_max_backups }} - fileMaxSizeMb: {{ cilium_hubble_export_file_max_size_mb }} - dynamic: - enabled: {{ cilium_hubble_export_dynamic_enabled | to_json }} - config: - content: - {{ cilium_hubble_export_dynamic_config_content | to_nice_yaml(indent=10) | indent(10) }} - -gatewayAPI: - enabled: {{ cilium_gateway_api_enabled | to_json }} - -ipam: - mode: {{ cilium_ipam_mode }} - operator: - clusterPoolIPv4PodCIDRList: - - {{ cilium_pool_cidr | default(kube_pods_subnet) }} - clusterPoolIPv4MaskSize: {{ cilium_pool_mask_size | default(kube_network_node_prefix) }} - - clusterPoolIPv6PodCIDRList: - - {{ cilium_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }} - clusterPoolIPv6MaskSize: {{ cilium_pool_mask_size_ipv6 | default(kube_network_node_prefix_ipv6) }} - -cgroup: - autoMount: - enabled: {{ cilium_cgroup_auto_mount | to_json }} - hostRoot: {{ cilium_cgroup_host_root }} - -operator: - image: - repository: {{ cilium_operator_image_repo }} - tag: {{ cilium_operator_image_tag }} - replicas: {{ cilium_operator_replicas }} - extraArgs: - {{ cilium_operator_extra_args | to_nice_yaml(indent=2) | indent(4) }} - extraVolumes: - {{ cilium_operator_extra_volumes | to_nice_yaml(indent=2) | indent(4) }} - extraVolumeMounts: - {{ cilium_operator_extra_volume_mounts | to_nice_yaml(indent=2) | indent(4) }} - tolerations: - {{ cilium_operator_tolerations | to_nice_yaml(indent=2) | indent(4) }} - -cluster: - id: {{ cilium_cluster_id }} - name: {{ cilium_cluster_name }} - -enableIPv4Masquerade: {{ cilium_enable_ipv4_masquerade | to_json }} -enableIPv6Masquerade: {{ cilium_enable_ipv6_masquerade | to_json }} - -hostFirewall: - enabled: {{ cilium_enable_host_firewall | to_json }} - -certgen: - image: - repository: {{ cilium_hubble_certgen_image_repo }} - tag: {{ cilium_hubble_certgen_image_tag }} - -envoy: - image: - repository: {{ cilium_hubble_envoy_image_repo }} - tag: {{ cilium_hubble_envoy_image_tag }} - -extraConfig: - {{ cilium_config_extra_vars | to_yaml | indent(2) }} diff --git a/roles/network_plugin/cni/defaults/main.yml b/roles/network_plugin/cni/defaults/main.yml new file mode 100644 index 00000000000..5d11edfa380 --- /dev/null +++ b/roles/network_plugin/cni/defaults/main.yml @@ -0,0 +1,2 @@ +--- +cni_bin_owner: "{{ kube_owner }}" diff --git a/roles/network_plugin/cni/tasks/main.yml b/roles/network_plugin/cni/tasks/main.yml new file mode 100644 index 00000000000..28376bd7605 --- /dev/null +++ b/roles/network_plugin/cni/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: CNI | make sure /opt/cni/bin exists + file: + path: /opt/cni/bin + state: directory + mode: "0755" + owner: "{{ cni_bin_owner }}" + recurse: true + +- name: CNI | Copy cni plugins + unarchive: + src: "{{ downloads.cni.dest }}" + dest: "/opt/cni/bin" + mode: "0755" + owner: "{{ cni_bin_owner }}" + remote_src: true diff --git a/roles/network_plugin/custom_cni/defaults/main.yml b/roles/network_plugin/custom_cni/defaults/main.yml new file mode 100644 index 00000000000..0eab14374e3 --- /dev/null +++ b/roles/network_plugin/custom_cni/defaults/main.yml @@ -0,0 +1,11 @@ +--- + +custom_cni_manifests: [] + +custom_cni_chart_namespace: kube-system +custom_cni_chart_release_name: "" +custom_cni_chart_repository_name: "" +custom_cni_chart_repository_url: "" +custom_cni_chart_ref: "" +custom_cni_chart_version: "" +custom_cni_chart_values: {} diff --git a/roles/network_plugin/custom_cni/meta/main.yml b/roles/network_plugin/custom_cni/meta/main.yml new file mode 100644 index 00000000000..361c406de2a --- /dev/null +++ b/roles/network_plugin/custom_cni/meta/main.yml @@ -0,0 +1,20 @@ +--- +dependencies: + - role: helm-apps + when: + - inventory_hostname == groups['kube_control_plane'][0] + - custom_cni_chart_release_name | length > 0 + environment: + http_proxy: "{{ http_proxy | default('') }}" + https_proxy: "{{ https_proxy | default('') }}" + release_common_opts: {} + releases: + - name: "{{ custom_cni_chart_release_name }}" + namespace: "{{ custom_cni_chart_namespace }}" + chart_ref: "{{ custom_cni_chart_ref }}" + chart_version: "{{ custom_cni_chart_version }}" + wait: true + values: "{{ custom_cni_chart_values }}" + repositories: + - name: "{{ custom_cni_chart_repository_name }}" + url: "{{ custom_cni_chart_repository_url }}" diff --git a/roles/network_plugin/custom_cni/tasks/main.yml b/roles/network_plugin/custom_cni/tasks/main.yml new file mode 100644 index 00000000000..a1397c8281f --- /dev/null +++ b/roles/network_plugin/custom_cni/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: Custom CNI | Manifest deployment + when: not custom_cni_chart_release_name | length > 0 + block: + - name: Custom CNI | Check Custom CNI Manifests + assert: + that: + - "custom_cni_manifests | length > 0" + msg: "custom_cni_manifests should not be empty" + + - name: Custom CNI | Copy Custom manifests + template: + src: "{{ item }}" + dest: "{{ kube_config_dir }}/{{ item | basename | replace('.j2', '') }}" + mode: "0644" + loop: "{{ custom_cni_manifests }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true + + - name: Custom CNI | Start Resources + kube: + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item | basename | replace('.j2', '') }}" + state: "latest" + wait: true + loop: "{{ custom_cni_manifests }}" + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml new file mode 100644 index 00000000000..16ada70030f --- /dev/null +++ b/roles/network_plugin/flannel/defaults/main.yml @@ -0,0 +1,28 @@ +--- +# Flannel public IP +# The address that flannel should advertise as how to access the system +# Disabled until https://github.com/coreos/flannel/issues/712 is fixed +# flannel_public_ip: "{{ main_access_ip }}" + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +flannel_backend_type: "vxlan" +flannel_vxlan_vni: 1 +flannel_vxlan_port: 8472 +flannel_vxlan_direct_routing: false + +# Limits for apps +flannel_memory_limit: 500M +flannel_cpu_limit: 300m +flannel_memory_requests: 64M +flannel_cpu_requests: 150m diff --git a/roles/network_plugin/flannel/meta/main.yml b/roles/network_plugin/flannel/meta/main.yml new file mode 100644 index 00000000000..9b7065f1854 --- /dev/null +++ b/roles/network_plugin/flannel/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml new file mode 100644 index 00000000000..94603fcf52c --- /dev/null +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -0,0 +1,21 @@ +--- + +- name: Flannel | Stop if kernel version is too low for Flannel Wireguard encryption + assert: + that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') + when: + - kube_network_plugin == 'flannel' + - flannel_backend_type == 'wireguard' + - not ignore_assert_errors + +- name: Flannel | Create Flannel manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: flannel, file: cni-flannel-rbac.yml, type: sa} + - {name: kube-flannel, file: cni-flannel.yml, type: ds} + register: flannel_node_manifests + when: + - inventory_hostname == groups['kube_control_plane'][0] diff --git a/roles/network_plugin/flannel/tasks/reset.yml b/roles/network_plugin/flannel/tasks/reset.yml new file mode 100644 index 00000000000..c4b1b881581 --- /dev/null +++ b/roles/network_plugin/flannel/tasks/reset.yml @@ -0,0 +1,24 @@ +--- +- name: Reset | check cni network device + stat: + path: /sys/class/net/cni0 + get_attributes: false + get_checksum: false + get_mime: false + register: cni + +- name: Reset | remove the network device created by the flannel + command: ip link del cni0 + when: cni.stat.exists + +- name: Reset | check flannel network device + stat: + path: /sys/class/net/flannel.1 + get_attributes: false + get_checksum: false + get_mime: false + register: flannel + +- name: Reset | remove the network device created by the flannel + command: ip link del flannel.1 + when: flannel.stat.exists diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 new file mode 100644 index 00000000000..631ec5eb6c9 --- /dev/null +++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -0,0 +1,52 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - "networking.k8s.io" + resources: + - clustercidrs + verbs: + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 new file mode 100644 index 00000000000..da4cfcde5b0 --- /dev/null +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -0,0 +1,172 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { +{% if ipv4_stack %} + "Network": "{{ kube_pods_subnet }}", + "EnableIPv4": true, +{% endif %} +{% if ipv6_stack %} + "EnableIPv6": true, + "IPv6Network": "{{ kube_pods_subnet_ipv6 }}", +{% endif %} + "Backend": { + "Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %}, + "VNI": {{ flannel_vxlan_vni }}, + "Port": {{ flannel_vxlan_port }}, + "DirectRouting": {{ flannel_vxlan_direct_routing | to_json }} +{% endif %} + } + } +{% for arch in ['amd64', 'arm64', 'arm', 'ppc64le', 's390x'] %} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: +{% if arch == 'amd64' %} + name: kube-flannel +{% else %} + name: kube-flannel-ds-{{ arch }} +{% endif %} + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + priorityClassName: system-node-critical + serviceAccountName: flannel + containers: + - name: kube-flannel + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ] + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - {{ arch }} + initContainers: + - name: install-cni-plugin + image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag }} + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - operator: Exists + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: cni-plugin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate +{% endfor %} diff --git a/roles/network_plugin/kube-ovn/defaults/main.yml b/roles/network_plugin/kube-ovn/defaults/main.yml new file mode 100644 index 00000000000..4262a775b27 --- /dev/null +++ b/roles/network_plugin/kube-ovn/defaults/main.yml @@ -0,0 +1,135 @@ +--- +kube_ovn_db_cpu_request: 500m +kube_ovn_db_memory_request: 200Mi +kube_ovn_db_cpu_limit: 3000m +kube_ovn_db_memory_limit: 3000Mi +kube_ovn_node_cpu_request: 200m +kube_ovn_node_memory_request: 200Mi +kube_ovn_node_cpu_limit: 1000m +kube_ovn_node_memory_limit: 800Mi +kube_ovn_cni_server_cpu_request: 200m +kube_ovn_cni_server_memory_request: 200Mi +kube_ovn_cni_server_cpu_limit: 1000m +kube_ovn_cni_server_memory_limit: 1Gi +kube_ovn_controller_cpu_request: 200m +kube_ovn_controller_memory_request: 200Mi +kube_ovn_controller_cpu_limit: 1000m +kube_ovn_controller_memory_limit: 1Gi +kube_ovn_pinger_cpu_request: 100m +kube_ovn_pinger_memory_request: 200Mi +kube_ovn_pinger_cpu_limit: 200m +kube_ovn_pinger_memory_limit: 400Mi +kube_ovn_monitor_memory_request: 200Mi +kube_ovn_monitor_cpu_request: 200m +kube_ovn_monitor_memory_limit: 200Mi +kube_ovn_monitor_cpu_limit: 200m +kube_ovn_dpdk_node_cpu_request: 1000m +kube_ovn_dpdk_node_memory_request: 2Gi +kube_ovn_dpdk_node_cpu_limit: 1000m +kube_ovn_dpdk_node_memory_limit: 2Gi + +kube_ovn_central_hosts: "{{ groups['kube_control_plane'] }}" +kube_ovn_central_replics: "{{ kube_ovn_central_hosts | length }}" +kube_ovn_controller_replics: "{{ kube_ovn_central_hosts | length }}" +kube_ovn_central_ips: |- + {% for item in kube_ovn_central_hosts -%} + {{ hostvars[item]['main_ip'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + +kube_ovn_ic_enable: false +kube_ovn_ic_autoroute: true +kube_ovn_ic_dbhost: "127.0.0.1" +kube_ovn_ic_zone: "kubernetes" + +# geneve or vlan +kube_ovn_network_type: geneve + +# geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module +kube_ovn_tunnel_type: geneve + +## The nic to support container network can be a nic name or a group of regex separated by comma e.g: 'enp6s0f0,eth.*', if empty will use the nic that the default route use. +# kube_ovn_iface: eth1 +## The MTU used by pod iface in overlay networks (default iface MTU - 100) +# kube_ovn_mtu: 1333 + +## Enable hw-offload, disable traffic mirror and set the iface to the physical port. Make sure that there is an IP address bind to the physical port. +kube_ovn_hw_offload: false +# traffic mirror +kube_ovn_traffic_mirror: false + +# kube_ovn_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 +# kube_ovn_default_interface_name: eth0 + +kube_ovn_external_address: 8.8.8.8 +kube_ovn_external_address_ipv6: 2400:3200::1 +kube_ovn_external_address_merged: >- + {%- if ipv4_stack and ipv6_stack -%} + {{ kube_ovn_external_address }},{{ kube_ovn_external_address_ipv6 }} + {%- elif ipv4_stack -%} + {{ kube_ovn_external_address }} + {%- else -%} + {{ kube_ovn_external_address_ipv6 }} + {%- endif -%} + +kube_ovn_external_dns: alauda.cn + +# kube_ovn_default_gateway: 10.233.64.1,fd85:ee78:d8a6:8607::1:0 +kube_ovn_default_gateway_check: true +kube_ovn_default_logical_gateway: false + +# u2o_interconnection +kube_ovn_u2o_interconnection: false + +# kube_ovn_default_exclude_ips: 10.16.0.1 +kube_ovn_node_switch_cidr: 100.64.0.0/16 +kube_ovn_node_switch_cidr_ipv6: fd00:100:64::/64 +kube_ovn_node_switch_cidr_merged: >- + {%- if ipv4_stack and ipv6_stack -%} + {{ kube_ovn_node_switch_cidr }},{{ kube_ovn_node_switch_cidr_ipv6 }} + {%- elif ipv4_stack -%} + {{ kube_ovn_node_switch_cidr }} + {%- else -%} + {{ kube_ovn_node_switch_cidr_ipv6 }} + {%- endif -%} + +## vlan config, set default interface name and vlan id +# kube_ovn_default_interface_name: eth0 +kube_ovn_default_vlan_id: 100 +kube_ovn_vlan_name: product + +## pod nic type, support: veth-pair or internal-port +kube_ovn_pod_nic_type: veth_pair + +## Enable load balancer +kube_ovn_enable_lb: true + +## Enable network policy support +kube_ovn_enable_np: true + +## Enable external vpc support +kube_ovn_enable_external_vpc: true + +## Enable checksum +kube_ovn_encap_checksum: true + +## enable ssl +kube_ovn_enable_ssl: false + +## dpdk +kube_ovn_dpdk_enabled: false +kube_ovn_dpdk_tunnel_iface: br-phy + +## bind local ip +kube_ovn_bind_local_ip_enabled: true + +## eip snat +kube_ovn_eip_snat_enabled: true + +# ls dnat mod dl dst +kube_ovn_ls_dnat_mod_dl_dst: true + +## keep vm ip +kube_ovn_keep_vm_ip: true + +## cni config priority, default: 01 +kube_ovn_cni_config_priority: '01' diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml new file mode 100644 index 00000000000..a8b94279202 --- /dev/null +++ b/roles/network_plugin/kube-ovn/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Kube-OVN | Label ovn-db node + command: "{{ kubectl }} label --overwrite node {{ item }} kube-ovn/role=master" + loop: "{{ kube_ovn_central_hosts }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Kube-OVN | Create Kube-OVN manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: kube-ovn-crd, file: cni-kube-ovn-crd.yml} + - {name: ovn, file: cni-ovn.yml} + - {name: kube-ovn, file: cni-kube-ovn.yml} + register: kube_ovn_node_manifests diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 new file mode 100644 index 00000000000..c531ffcbb1a --- /dev/null +++ b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2 @@ -0,0 +1,2587 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vpc-dnses.kubeovn.io +spec: + group: kubeovn.io + names: + plural: vpc-dnses + singular: vpc-dns + shortNames: + - vpc-dns + kind: VpcDns + listKind: VpcDnsList + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.active + name: Active + type: boolean + - jsonPath: .spec.vpc + name: Vpc + type: string + - jsonPath: .spec.subnet + name: Subnet + type: string + name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + vpc: + type: string + subnet: + type: string + replicas: + type: integer + minimum: 1 + maximum: 3 + status: + type: object + properties: + active: + type: boolean + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: switch-lb-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: switch-lb-rules + singular: switch-lb-rule + shortNames: + - slr + kind: SwitchLBRule + listKind: SwitchLBRuleList + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.vip + name: vip + type: string + - jsonPath: .status.ports + name: port(s) + type: string + - jsonPath: .status.service + name: service + type: string + - jsonPath: .metadata.creationTimestamp + name: age + type: date + name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + namespace: + type: string + vip: + type: string + sessionAffinity: + type: string + ports: + items: + properties: + name: + type: string + port: + type: integer + minimum: 1 + maximum: 65535 + protocol: + type: string + targetPort: + type: integer + minimum: 1 + maximum: 65535 + type: object + type: array + selector: + items: + type: string + type: array + endpoints: + items: + type: string + type: array + status: + type: object + properties: + ports: + type: string + service: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vpc-nat-gateways.kubeovn.io +spec: + group: kubeovn.io + names: + plural: vpc-nat-gateways + singular: vpc-nat-gateway + shortNames: + - vpc-nat-gw + kind: VpcNatGateway + listKind: VpcNatGatewayList + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.vpc + name: Vpc + type: string + - jsonPath: .spec.subnet + name: Subnet + type: string + - jsonPath: .spec.lanIp + name: LanIP + type: string + name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + externalSubnets: + items: + type: string + type: array + selector: + type: array + items: + type: string + qosPolicy: + type: string + tolerations: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + enum: + - Equal + - Exists + value: + type: string + effect: + type: string + enum: + - NoExecute + - NoSchedule + - PreferNoSchedule + tolerationSeconds: + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + spec: + type: object + properties: + lanIp: + type: string + subnet: + type: string + externalSubnets: + items: + type: string + type: array + vpc: + type: string + selector: + type: array + items: + type: string + qosPolicy: + type: string + tolerations: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + enum: + - Equal + - Exists + value: + type: string + effect: + type: string + enum: + - NoExecute + - NoSchedule + - PreferNoSchedule + tolerationSeconds: + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + x-kubernetes-patch-strategy: merge + x-kubernetes-patch-merge-key: key + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-eips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-eips + singular: iptables-eip + shortNames: + - eip + kind: IptablesEIP + listKind: IptablesEIPList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.ip + name: IP + type: string + - jsonPath: .spec.macAddress + name: Mac + type: string + - jsonPath: .status.nat + name: Nat + type: string + - jsonPath: .spec.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + ip: + type: string + nat: + type: string + redo: + type: string + qosPolicy: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + v4ip: + type: string + v6ip: + type: string + macAddress: + type: string + natGwDp: + type: string + qosPolicy: + type: string + externalSubnet: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-fip-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-fip-rules + singular: iptables-fip-rule + shortNames: + - fip + kind: IptablesFIPRule + listKind: IptablesFIPRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: Eip + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .spec.internalIp + name: InternalIp + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + internalIp: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + internalIp: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-dnat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-dnat-rules + singular: iptables-dnat-rule + shortNames: + - dnat + kind: IptablesDnatRule + listKind: IptablesDnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: Eip + type: string + - jsonPath: .spec.protocol + name: Protocol + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .spec.internalIp + name: InternalIp + type: string + - jsonPath: .spec.externalPort + name: ExternalPort + type: string + - jsonPath: .spec.internalPort + name: InternalPort + type: string + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + protocol: + type: string + internalIp: + type: string + internalPort: + type: string + externalPort: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + externalPort: + type: string + protocol: + type: string + internalIp: + type: string + internalPort: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: iptables-snat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: iptables-snat-rules + singular: iptables-snat-rule + shortNames: + - snat + kind: IptablesSnatRule + listKind: IptablesSnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.eip + name: EIP + type: string + - jsonPath: .status.v4ip + name: V4ip + type: string + - jsonPath: .status.v6ip + name: V6ip + type: string + - jsonPath: .spec.internalCIDR + name: InternalCIDR + type: string + - jsonPath: .status.natGwDp + name: NatGwDp + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + natGwDp: + type: string + redo: + type: string + internalCIDR: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + eip: + type: string + internalCIDR: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ovn-eips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: ovn-eips + singular: ovn-eip + shortNames: + - oeip + kind: OvnEip + listKind: OvnEipList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.v4Ip + name: V4IP + type: string + - jsonPath: .status.v6Ip + name: V6IP + type: string + - jsonPath: .status.macAddress + name: Mac + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.nat + name: Nat + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + type: + type: string + nat: + type: string + ready: + type: boolean + v4Ip: + type: string + v6Ip: + type: string + macAddress: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + externalSubnet: + type: string + type: + type: string + v4Ip: + type: string + v6Ip: + type: string + macAddress: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ovn-fips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: ovn-fips + singular: ovn-fip + shortNames: + - ofip + kind: OvnFip + listKind: OvnFipList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.vpc + name: Vpc + type: string + - jsonPath: .status.v4Eip + name: V4Eip + type: string + - jsonPath: .status.v4Ip + name: V4Ip + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + - jsonPath: .spec.ipType + name: IpType + type: string + - jsonPath: .spec.ipName + name: IpName + type: string + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4Eip: + type: string + v4Ip: + type: string + vpc: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + ovnEip: + type: string + ipType: + type: string + ipName: + type: string + vpc: + type: string + v4Ip: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ovn-snat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: ovn-snat-rules + singular: ovn-snat-rule + shortNames: + - osnat + kind: OvnSnatRule + listKind: OvnSnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.vpc + name: Vpc + type: string + - jsonPath: .status.v4Eip + name: V4Eip + type: string + - jsonPath: .status.v4IpCidr + name: V4IpCidr + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4Eip: + type: string + v4IpCidr: + type: string + vpc: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + ovnEip: + type: string + vpcSubnet: + type: string + ipName: + type: string + vpc: + type: string + v4IpCidr: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ovn-dnat-rules.kubeovn.io +spec: + group: kubeovn.io + names: + plural: ovn-dnat-rules + singular: ovn-dnat-rule + shortNames: + - odnat + kind: OvnDnatRule + listKind: OvnDnatRuleList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.vpc + name: Vpc + type: string + - jsonPath: .spec.ovnEip + name: Eip + type: string + - jsonPath: .status.protocol + name: Protocol + type: string + - jsonPath: .status.v4Eip + name: V4Eip + type: string + - jsonPath: .status.v4Ip + name: V4Ip + type: string + - jsonPath: .status.internalPort + name: InternalPort + type: string + - jsonPath: .status.externalPort + name: ExternalPort + type: string + - jsonPath: .spec.ipName + name: IpName + type: string + - jsonPath: .status.ready + name: Ready + type: boolean + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + ready: + type: boolean + v4Eip: + type: string + v4Ip: + type: string + vpc: + type: string + externalPort: + type: string + internalPort: + type: string + protocol: + type: string + ipName: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + ovnEip: + type: string + ipType: + type: string + ipName: + type: string + externalPort: + type: string + internalPort: + type: string + protocol: + type: string + vpc: + type: string + v4Ip: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vpcs.kubeovn.io +spec: + group: kubeovn.io + versions: + - additionalPrinterColumns: + - jsonPath: .status.enableExternal + name: EnableExternal + type: boolean + - jsonPath: .status.enableBfd + name: EnableBfd + type: boolean + - jsonPath: .status.standby + name: Standby + type: boolean + - jsonPath: .status.subnets + name: Subnets + type: string + - jsonPath: .status.extraExternalSubnets + name: ExtraExternalSubnets + type: string + - jsonPath: .spec.namespaces + name: Namespaces + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + spec: + properties: + enableExternal: + type: boolean + enableBfd: + type: boolean + namespaces: + items: + type: string + type: array + extraExternalSubnets: + items: + type: string + type: array + staticRoutes: + items: + properties: + policy: + type: string + cidr: + type: string + nextHopIP: + type: string + ecmpMode: + type: string + bfdId: + type: string + routeTable: + type: string + type: object + type: array + policyRoutes: + items: + properties: + priority: + type: integer + action: + type: string + match: + type: string + nextHopIP: + type: string + type: object + type: array + vpcPeerings: + items: + properties: + remoteVpc: + type: string + localConnectIP: + type: string + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + type: string + lastUpdateTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + default: + type: boolean + defaultLogicalSwitch: + type: string + router: + type: string + standby: + type: boolean + enableExternal: + type: boolean + enableBfd: + type: boolean + subnets: + items: + type: string + type: array + extraExternalSubnets: + items: + type: string + type: array + vpcPeerings: + items: + type: string + type: array + tcpLoadBalancer: + type: string + tcpSessionLoadBalancer: + type: string + udpLoadBalancer: + type: string + udpSessionLoadBalancer: + type: string + sctpLoadBalancer: + type: string + sctpSessionLoadBalancer: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + names: + kind: Vpc + listKind: VpcList + plural: vpcs + shortNames: + - vpc + singular: vpc + scope: Cluster +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ips.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: V4IP + type: string + jsonPath: .spec.v4IpAddress + - name: V6IP + type: string + jsonPath: .spec.v6IpAddress + - name: Mac + type: string + jsonPath: .spec.macAddress + - name: Node + type: string + jsonPath: .spec.nodeName + - name: Subnet + type: string + jsonPath: .spec.subnet + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + podName: + type: string + namespace: + type: string + subnet: + type: string + attachSubnets: + type: array + items: + type: string + nodeName: + type: string + ipAddress: + type: string + v4IpAddress: + type: string + v6IpAddress: + type: string + attachIps: + type: array + items: + type: string + macAddress: + type: string + attachMacs: + type: array + items: + type: string + containerID: + type: string + podType: + type: string + scope: Cluster + names: + plural: ips + singular: ip + kind: IP + shortNames: + - ip +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vips.kubeovn.io +spec: + group: kubeovn.io + names: + plural: vips + singular: vip + shortNames: + - vip + kind: Vip + listKind: VipList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: V4IP + type: string + jsonPath: .status.v4ip + - name: V6IP + type: string + jsonPath: .status.v6ip + - name: Mac + type: string + jsonPath: .status.mac + - name: PMac + type: string + jsonPath: .spec.parentMac + - name: Subnet + type: string + jsonPath: .spec.subnet + - jsonPath: .status.ready + name: Ready + type: boolean + - jsonPath: .status.type + name: Type + type: string + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + type: + type: string + ready: + type: boolean + v4ip: + type: string + v6ip: + type: string + mac: + type: string + pv4ip: + type: string + pv6ip: + type: string + pmac: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + namespace: + type: string + subnet: + type: string + type: + type: string + attachSubnets: + type: array + items: + type: string + v4ip: + type: string + macAddress: + type: string + v6ip: + type: string + parentV4ip: + type: string + parentMac: + type: string + parentV6ip: + type: string +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: subnets.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Provider + type: string + jsonPath: .spec.provider + - name: Vpc + type: string + jsonPath: .spec.vpc + - name: Protocol + type: string + jsonPath: .spec.protocol + - name: CIDR + type: string + jsonPath: .spec.cidrBlock + - name: Private + type: boolean + jsonPath: .spec.private + - name: NAT + type: boolean + jsonPath: .spec.natOutgoing + - name: Default + type: boolean + jsonPath: .spec.default + - name: GatewayType + type: string + jsonPath: .spec.gatewayType + - name: V4Used + type: number + jsonPath: .status.v4usingIPs + - name: V4Available + type: number + jsonPath: .status.v4availableIPs + - name: V6Used + type: number + jsonPath: .status.v6usingIPs + - name: V6Available + type: number + jsonPath: .status.v6availableIPs + - name: ExcludeIPs + type: string + jsonPath: .spec.excludeIps + - name: U2OInterconnectionIP + type: string + jsonPath: .status.u2oInterconnectionIP + schema: + openAPIV3Schema: + type: object + properties: + metadata: + type: object + properties: + name: + type: string + pattern: ^[^0-9] + status: + type: object + properties: + v4availableIPs: + type: number + v4usingIPs: + type: number + v6availableIPs: + type: number + v6usingIPs: + type: number + activateGateway: + type: string + dhcpV4OptionsUUID: + type: string + dhcpV6OptionsUUID: + type: string + u2oInterconnectionIP: + type: string + u2oInterconnectionVPC: + type: string + v4usingIPrange: + type: string + v4availableIPrange: + type: string + v6usingIPrange: + type: string + v6availableIPrange: + type: string + natOutgoingPolicyRules: + type: array + items: + type: object + properties: + ruleID: + type: string + action: + type: string + enum: + - nat + - forward + match: + type: object + properties: + srcIPs: + type: string + dstIPs: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + vpc: + type: string + default: + type: boolean + protocol: + type: string + enum: + - IPv4 + - IPv6 + - Dual + cidrBlock: + type: string + namespaces: + type: array + items: + type: string + gateway: + type: string + provider: + type: string + excludeIps: + type: array + items: + type: string + vips: + type: array + items: + type: string + gatewayType: + type: string + allowSubnets: + type: array + items: + type: string + gatewayNode: + type: string + natOutgoing: + type: boolean + externalEgressGateway: + type: string + policyRoutingPriority: + type: integer + minimum: 1 + maximum: 32765 + policyRoutingTableID: + type: integer + minimum: 1 + maximum: 2147483647 + not: + enum: + - 252 # compat + - 253 # default + - 254 # main + - 255 # local + mtu: + type: integer + minimum: 68 + maximum: 65535 + private: + type: boolean + vlan: + type: string + logicalGateway: + type: boolean + disableGatewayCheck: + type: boolean + disableInterConnection: + type: boolean + enableDHCP: + type: boolean + dhcpV4Options: + type: string + dhcpV6Options: + type: string + enableIPv6RA: + type: boolean + ipv6RAConfigs: + type: string + acls: + type: array + items: + type: object + properties: + direction: + type: string + enum: + - from-lport + - to-lport + priority: + type: integer + minimum: 0 + maximum: 32767 + match: + type: string + action: + type: string + enum: + - allow-related + - allow-stateless + - allow + - drop + - reject + natOutgoingPolicyRules: + type: array + items: + type: object + properties: + action: + type: string + enum: + - nat + - forward + match: + type: object + properties: + srcIPs: + type: string + dstIPs: + type: string + u2oInterconnection: + type: boolean + u2oInterconnectionIP: + type: string + enableLb: + type: boolean + enableEcmp: + type: boolean + enableMulticastSnoop: + type: boolean + routeTable: + type: string + scope: Cluster + names: + plural: subnets + singular: subnet + kind: Subnet + shortNames: + - subnet +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Subnet + type: string + jsonPath: .spec.subnet + - name: IPs + type: string + jsonPath: .spec.ips + - name: V4Used + type: number + jsonPath: .status.v4UsingIPs + - name: V4Available + type: number + jsonPath: .status.v4AvailableIPs + - name: V6Used + type: number + jsonPath: .status.v6UsingIPs + - name: V6Available + type: number + jsonPath: .status.v6AvailableIPs + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + subnet: + type: string + x-kubernetes-validations: + - rule: "self == oldSelf" + message: "This field is immutable." + namespaces: + type: array + x-kubernetes-list-type: set + items: + type: string + ips: + type: array + minItems: 1 + x-kubernetes-list-type: set + items: + type: string + anyOf: + - format: ipv4 + - format: ipv6 + - format: cidr + - pattern: ^(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.\.(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])$ + - pattern: ^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))\.\.((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))$ + required: + - subnet + - ips + status: + type: object + properties: + v4AvailableIPs: + type: number + v4UsingIPs: + type: number + v6AvailableIPs: + type: number + v6UsingIPs: + type: number + v4AvailableIPRange: + type: string + v4UsingIPRange: + type: string + v6AvailableIPRange: + type: string + v6UsingIPRange: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + scope: Cluster + names: + plural: ippools + singular: ippool + kind: IPPool + shortNames: + - ippool +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: vlans.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + id: + type: integer + minimum: 0 + maximum: 4095 + provider: + type: string + vlanId: + type: integer + description: Deprecated in favor of id + providerInterfaceName: + type: string + description: Deprecated in favor of provider + required: + - provider + status: + type: object + properties: + subnets: + type: array + items: + type: string + additionalPrinterColumns: + - name: ID + type: string + jsonPath: .spec.id + - name: Provider + type: string + jsonPath: .spec.provider + scope: Cluster + names: + plural: vlans + singular: vlan + kind: Vlan + shortNames: + - vlan +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: provider-networks.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 12 + not: + enum: + - int + spec: + type: object + properties: + defaultInterface: + type: string + maxLength: 15 + pattern: '^[^/\s]+$' + customInterfaces: + type: array + items: + type: object + properties: + interface: + type: string + maxLength: 15 + pattern: '^[^/\s]+$' + nodes: + type: array + items: + type: string + exchangeLinkName: + type: boolean + excludeNodes: + type: array + items: + type: string + required: + - defaultInterface + status: + type: object + properties: + ready: + type: boolean + readyNodes: + type: array + items: + type: string + notReadyNodes: + type: array + items: + type: string + vlans: + type: array + items: + type: string + conditions: + type: array + items: + type: object + properties: + node: + type: string + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + additionalPrinterColumns: + - name: DefaultInterface + type: string + jsonPath: .spec.defaultInterface + - name: Ready + type: boolean + jsonPath: .status.ready + scope: Cluster + names: + plural: provider-networks + singular: provider-network + kind: ProviderNetwork + listKind: ProviderNetworkList +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: security-groups.kubeovn.io +spec: + group: kubeovn.io + names: + plural: security-groups + singular: security-group + shortNames: + - sg + kind: SecurityGroup + listKind: SecurityGroupList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + ingressRules: + type: array + items: + type: object + properties: + ipVersion: + type: string + protocol: + type: string + priority: + type: integer + remoteType: + type: string + remoteAddress: + type: string + remoteSecurityGroup: + type: string + portRangeMin: + type: integer + portRangeMax: + type: integer + policy: + type: string + egressRules: + type: array + items: + type: object + properties: + ipVersion: + type: string + protocol: + type: string + priority: + type: integer + remoteType: + type: string + remoteAddress: + type: string + remoteSecurityGroup: + type: string + portRangeMin: + type: integer + portRangeMax: + type: integer + policy: + type: string + allowSameGroupTraffic: + type: boolean + status: + type: object + properties: + portGroup: + type: string + allowSameGroupTraffic: + type: boolean + ingressMd5: + type: string + egressMd5: + type: string + ingressLastSyncSuccess: + type: boolean + egressLastSyncSuccess: + type: boolean + subresources: + status: {} + conversion: + strategy: None +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: qos-policies.kubeovn.io +spec: + group: kubeovn.io + names: + plural: qos-policies + singular: qos-policy + shortNames: + - qos + kind: QoSPolicy + listKind: QoSPolicyList + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.shared + name: Shared + type: string + - jsonPath: .spec.bindingType + name: BindingType + type: string + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + properties: + shared: + type: boolean + bindingType: + type: string + bandwidthLimitRules: + type: array + items: + type: object + properties: + name: + type: string + interface: + type: string + rateMax: + type: string + burstMax: + type: string + priority: + type: integer + direction: + type: string + matchType: + type: string + matchValue: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + spec: + type: object + properties: + shared: + type: boolean + bindingType: + type: string + bandwidthLimitRules: + type: array + items: + type: object + properties: + name: + type: string + interface: + type: string + rateMax: + type: string + burstMax: + type: string + priority: + type: integer + direction: + type: string + matchType: + type: string + matchValue: + type: string + required: + - name + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map diff --git a/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 new file mode 100644 index 00000000000..b0fad2ff550 --- /dev/null +++ b/roles/network_plugin/kube-ovn/templates/cni-kube-ovn.yml.j2 @@ -0,0 +1,912 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: ovn-vpc-nat-config + namespace: kube-system + annotations: + kubernetes.io/description: | + kube-ovn vpc-nat common config +data: + image: {{ kube_ovn_vpc_container_image_repo }}:{{ kube_ovn_vpc_container_image_tag }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: ovn-vpc-nat-gw-config + namespace: kube-system +data: + enable-vpc-nat-gw: "true" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-ovn-cni + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.k8s.io/system-only: "true" + name: system:kube-ovn-cni +rules: + - apiGroups: + - "kubeovn.io" + resources: + - subnets + - vlans + - provider-networks + verbs: + - get + - list + - watch + - apiGroups: + - "" + - "kubeovn.io" + resources: + - ovn-eips + - ovn-eips/status + - nodes + - pods + - vlans + verbs: + - get + - list + - patch + - watch + - apiGroups: + - "kubeovn.io" + resources: + - ips + verbs: + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-ovn-cni +roleRef: + name: system:kube-ovn-cni + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: kube-ovn-cni + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kube-ovn-cni + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: kube-ovn-cni + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-ovn-app + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.k8s.io/system-only: "true" + name: system:kube-ovn-app +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-ovn-app +roleRef: + name: system:kube-ovn-app + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: kube-ovn-app + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kube-ovn-app + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: kube-ovn-app + namespace: kube-system +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kube-ovn-controller + namespace: kube-system + annotations: + kubernetes.io/description: | + kube-ovn controller +spec: + replicas: {{ kube_ovn_controller_replics }} + selector: + matchLabels: + app: kube-ovn-controller + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 100% + type: RollingUpdate + template: + metadata: + labels: + app: kube-ovn-controller + component: network + type: infra + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: "ovn.kubernetes.io/ic-gw" + operator: NotIn + values: + - "true" + weight: 100 + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: kube-ovn-controller + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: kube-ovn-controller + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - /kube-ovn/start-controller.sh + - --default-cidr={{ kube_pods_subnets }} + - --default-gateway={% if kube_ovn_default_gateway is defined %}{{ kube_ovn_default_gateway }}{% endif %}{{ '' }} + - --default-gateway-check={{ kube_ovn_default_gateway_check | string }} + - --default-logical-gateway={{ kube_ovn_default_logical_gateway | string }} + - --default-u2o-interconnection={{ kube_ovn_u2o_interconnection }} + - --default-exclude-ips={% if kube_ovn_default_exclude_ips is defined %}{{ kube_ovn_default_exclude_ips }}{% endif %}{{ '' }} + - --node-switch-cidr={{ kube_ovn_node_switch_cidr_merged }} + - --service-cluster-ip-range={{ kube_service_subnets }} + - --network-type={{ kube_ovn_network_type }} + - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} + - --default-vlan-id={{ kube_ovn_default_vlan_id }} + - --ls-dnat-mod-dl-dst={{ kube_ovn_ls_dnat_mod_dl_dst }} + - --pod-nic-type={{ kube_ovn_pod_nic_type }} + - --enable-lb={{ kube_ovn_enable_lb | string }} + - --enable-np={{ kube_ovn_enable_np | string }} + - --enable-eip-snat={{ kube_ovn_eip_snat_enabled }} + - --enable-external-vpc={{ kube_ovn_enable_external_vpc | string }} + - --logtostderr=false + - --alsologtostderr=true + - --gc-interval=360 + - --inspect-interval=20 + - --log_file=/var/log/kube-ovn/kube-ovn-controller.log + - --log_file_max_size=0 + - --enable-lb-svc=false + - --keep-vm-ip={{ kube_ovn_keep_vm_ip }} + securityContext: + runAsUser: 0 + privileged: false + capabilities: + add: + - NET_BIND_SERVICE + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OVN_DB_IPS + value: "{{ kube_ovn_central_ips }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs + - name: ENABLE_BIND_LOCAL_IP + value: "{{ kube_ovn_bind_local_ip_enabled }}" + volumeMounts: + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /var/log/ovn + name: ovn-log + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - /kube-ovn/kube-ovn-healthcheck + - --port=10660 + - --tls=false + periodSeconds: 3 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - /kube-ovn/kube-ovn-healthcheck + - --port=10660 + - --tls=false + initialDelaySeconds: 300 + periodSeconds: 7 + failureThreshold: 5 + timeoutSeconds: 45 + resources: + requests: + cpu: {{ kube_ovn_controller_cpu_request }} + memory: {{ kube_ovn_controller_memory_request }} + limits: + cpu: {{ kube_ovn_controller_cpu_limit }} + memory: {{ kube_ovn_controller_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: ovn-log + hostPath: + path: /var/log/ovn + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls + +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-ovn-cni + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the kube-ovn cni daemon. +spec: + selector: + matchLabels: + app: kube-ovn-cni + template: + metadata: + labels: + app: kube-ovn-cni + component: network + type: infra + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-ovn-cni + hostNetwork: true + hostPID: true + initContainers: + - name: install-cni + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/install-cni.sh"] + securityContext: + runAsUser: 0 + privileged: true + volumeMounts: + - mountPath: /opt/cni/bin + name: cni-bin + - mountPath: /usr/local/bin + name: local-bin + containers: + - name: cni-server + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - bash + - /kube-ovn/start-cniserver.sh + args: + - --enable-mirror={{ kube_ovn_traffic_mirror | lower }} + - --encap-checksum={{ kube_ovn_encap_checksum | lower }} + - --service-cluster-ip-range={{ kube_service_subnets }} + - --iface={{ kube_ovn_iface | default('') }} + - --dpdk-tunnel-iface={{ kube_ovn_dpdk_tunnel_iface }} + - --network-type={{ kube_ovn_network_type }} + - --default-interface-name={{ kube_ovn_default_interface_name | default('') }} + {% if kube_ovn_mtu is defined %} + - --mtu={{ kube_ovn_mtu }} +{% endif %} + - --cni-conf-name={{ kube_ovn_cni_config_priority }}-kube-ovn.conflist + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-cni.log + - --log_file_max_size=0 + securityContext: + runAsUser: 0 + privileged: false + capabilities: + add: + - NET_ADMIN + - NET_BIND_SERVICE + - NET_RAW + - SYS_ADMIN + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MODULES + value: kube_ovn_fastpath.ko + - name: RPMS + value: openvswitch-kmod + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs + - name: ENABLE_BIND_LOCAL_IP + value: "{{ kube_ovn_bind_local_ip_enabled }}" + - name: DBUS_SYSTEM_BUS_ADDRESS + value: "unix:path=/host/var/run/dbus/system_bus_socket" + volumeMounts: + - name: host-modules + mountPath: /lib/modules + readOnly: true + - name: shared-dir + mountPath: $KUBELET_DIR/pods + - mountPath: /etc/openvswitch + name: systemid + readOnly: true + - mountPath: /etc/cni/net.d + name: cni-conf + - mountPath: /run/openvswitch + name: host-run-ovs + mountPropagation: HostToContainer + - mountPath: /run/ovn + name: host-run-ovn + - mountPath: /host/var/run/dbus + name: host-dbus + mountPropagation: HostToContainer + - mountPath: /var/run/netns + name: host-ns + mountPropagation: HostToContainer + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + readOnly: true + - mountPath: /tmp + name: tmp + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 7 + successThreshold: 1 + exec: + command: + - /kube-ovn/kube-ovn-healthcheck + - --port=10665 + - --tls=false + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + periodSeconds: 7 + successThreshold: 1 + exec: + command: + - /kube-ovn/kube-ovn-healthcheck + - --port=10665 + - --tls=false + timeoutSeconds: 5 + resources: + requests: + cpu: {{ kube_ovn_cni_server_cpu_request }} + memory: {{ kube_ovn_cni_server_memory_request }} + limits: + cpu: {{ kube_ovn_cni_server_cpu_limit }} + memory: {{ kube_ovn_cni_server_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: shared-dir + hostPath: + path: /var/lib/kubelet/pods + - name: systemid + hostPath: + path: /etc/origin/openvswitch + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: cni-conf + hostPath: + path: /etc/cni/net.d + - name: cni-bin + hostPath: + path: /opt/cni/bin + - name: host-ns + hostPath: + path: /var/run/netns + - name: host-dbus + hostPath: + path: /var/run/dbus + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: tmp + hostPath: + path: /tmp + - name: local-bin + hostPath: + path: /usr/local/bin +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kube-ovn-pinger + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the openvswitch daemon. +spec: + selector: + matchLabels: + app: kube-ovn-pinger + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: kube-ovn-pinger + component: network + type: infra + spec: + priorityClassName: system-node-critical + serviceAccountName: ovn + hostPID: true + containers: + - name: pinger + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + command: + - /kube-ovn/kube-ovn-pinger + args: + - --external-address={{ kube_ovn_external_address_merged }} + - --external-dns={{ kube_ovn_external_dns }} + - --logtostderr=false + - --alsologtostderr=true + - --log_file=/var/log/kube-ovn/kube-ovn-pinger.log + - --log_file_max_size=0 + imagePullPolicy: {{ k8s_image_pull_policy }} + securityContext: + runAsUser: 0 + privileged: false + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /var/log/openvswitch + name: host-log-ovs + readOnly: true + - mountPath: /var/log/ovn + name: host-log-ovn + readOnly: true + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + - mountPath: /etc/localtime + name: localtime + readOnly: true + - mountPath: /var/run/tls + name: kube-ovn-tls + resources: + requests: + cpu: {{ kube_ovn_pinger_cpu_request }} + memory: {{ kube_ovn_pinger_memory_request }} + limits: + cpu: {{ kube_ovn_pinger_cpu_limit }} + memory: {{ kube_ovn_pinger_memory_limit }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kube-ovn-monitor + namespace: kube-system + annotations: + kubernetes.io/description: | + Metrics for OVN components: northd, nb and sb. +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: kube-ovn-monitor + template: + metadata: + labels: + app: kube-ovn-monitor + component: network + type: infra + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: kube-ovn-monitor + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn + hostNetwork: true + containers: + - name: kube-ovn-monitor + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/start-ovn-monitor.sh"] + args: + - --secure-serving=false + - --log_file=/var/log/kube-ovn/kube-ovn-monitor.log + - --logtostderr=false + - --alsologtostderr=true + - --log_file_max_size=200 + securityContext: + runAsUser: 0 + privileged: false + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs + - name: ENABLE_BIND_LOCAL_IP + value: "{{ kube_ovn_bind_local_ip_enabled }}" + resources: + requests: + cpu: {{ kube_ovn_monitor_cpu_request }} + memory: {{ kube_ovn_monitor_memory_request }} + limits: + cpu: {{ kube_ovn_monitor_cpu_limit }} + memory: {{ kube_ovn_monitor_memory_limit }} + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/ovn + name: host-log-ovn + readOnly: true + - mountPath: /etc/localtime + name: localtime + readOnly: true + - mountPath: /var/run/tls + name: kube-ovn-tls + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 7 + successThreshold: 1 + exec: + command: + - /kube-ovn/kube-ovn-healthcheck + - --port=10661 + - --tls=false + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 7 + successThreshold: 1 + exec: + command: + - /kube-ovn/kube-ovn-healthcheck + - --port=10661 + - --tls=false + timeoutSeconds: 5 + nodeSelector: + kubernetes.io/os: "linux" + kube-ovn/role: "master" + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls + - name: kube-ovn-log + hostPath: + path: /var/log/kube-ovn +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-monitor + namespace: kube-system + labels: + app: kube-ovn-monitor +spec: + ports: + - name: metrics + port: 10661 + type: ClusterIP +{% if ipv6_stack %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-monitor + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-pinger + namespace: kube-system + labels: + app: kube-ovn-pinger +spec: +{% if ipv6_stack %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-pinger + ports: + - port: 8080 + name: metrics +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-controller + namespace: kube-system + labels: + app: kube-ovn-controller +spec: +{% if ipv6_stack %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-controller + ports: + - port: 10660 + name: metrics +--- +kind: Service +apiVersion: v1 +metadata: + name: kube-ovn-cni + namespace: kube-system + labels: + app: kube-ovn-cni +spec: +{% if ipv6_stack %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: kube-ovn-cni + ports: + - port: 10665 + name: metrics +{% if kube_ovn_ic_enable %} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: ovn-ic-config + namespace: kube-system +data: + enable-ic: "{{ kube_ovn_ic_enable | lower }}" + az-name: "{{ kube_ovn_ic_zone }}" + ic-db-host: "{{ kube_ovn_ic_dbhost }}" + ic-nb-port: "6645" + ic-sb-port: "6646" + gw-nodes: "{{ kube_ovn_central_hosts | join(',') }}" + auto-route: "{{ kube_ovn_ic_autoroute | lower }}" +{% endif %} diff --git a/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 b/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 new file mode 100644 index 00000000000..09f0b291ae2 --- /dev/null +++ b/roles/network_plugin/kube-ovn/templates/cni-ovn.yml.j2 @@ -0,0 +1,674 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ovn-ovs + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.k8s.io/system-only: "true" + name: system:ovn-ovs +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - patch + - apiGroups: + - "" + resources: + - services + - endpoints + verbs: + - get + - apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ovn-ovs +roleRef: + name: system:ovn-ovs + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: ovn-ovs + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ovn + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.k8s.io/system-only: "true" + name: system:ovn +rules: + - apiGroups: + - "kubeovn.io" + resources: + - vpcs + - vpcs/status + - vpc-nat-gateways + - vpc-nat-gateways/status + - subnets + - subnets/status + - ippools + - ippools/status + - ips + - vips + - vips/status + - vlans + - vlans/status + - provider-networks + - provider-networks/status + - security-groups + - security-groups/status + - iptables-eips + - iptables-fip-rules + - iptables-dnat-rules + - iptables-snat-rules + - iptables-eips/status + - iptables-fip-rules/status + - iptables-dnat-rules/status + - iptables-snat-rules/status + - ovn-eips + - ovn-fips + - ovn-snat-rules + - ovn-eips/status + - ovn-fips/status + - ovn-snat-rules/status + - ovn-dnat-rules + - ovn-dnat-rules/status + - switch-lb-rules + - switch-lb-rules/status + - vpc-dnses + - vpc-dnses/status + - qos-policies + - qos-policies/status + verbs: + - "*" + - apiGroups: + - "" + resources: + - pods + - namespaces + verbs: + - get + - list + - patch + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - pods/exec + verbs: + - create + - apiGroups: + - "k8s.cni.cncf.io" + resources: + - network-attachment-definitions + verbs: + - get + - apiGroups: + - "" + - networking.k8s.io + resources: + - networkpolicies + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - apiGroups: + - "" + resources: + - services + - services/status + verbs: + - get + - list + - update + - create + - delete + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - update + - get + - list + - watch + - apiGroups: + - apps + resources: + - statefulsets + - deployments + - deployments/scale + verbs: + - get + - list + - create + - delete + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - "*" + - apiGroups: + - "kubevirt.io" + resources: + - virtualmachines + - virtualmachineinstances + verbs: + - get + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ovn +roleRef: + name: system:ovn + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: ovn + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ovn + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: ovn + namespace: kube-system +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-nb + namespace: kube-system +spec: + ports: + - name: ovn-nb + protocol: TCP + port: 6641 + targetPort: 6641 + type: ClusterIP +{% if ipv6_stack %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-nb-leader: "true" + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-sb + namespace: kube-system +spec: + ports: + - name: ovn-sb + protocol: TCP + port: 6642 + targetPort: 6642 + type: ClusterIP +{% if ipv6_stack %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-sb-leader: "true" + sessionAffinity: None +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-northd + namespace: kube-system +spec: + ports: + - name: ovn-northd + protocol: TCP + port: 6643 + targetPort: 6643 + type: ClusterIP +{% if ipv6_stack %} + ipFamilyPolicy: PreferDualStack +{% endif %} + selector: + app: ovn-central + ovn-northd-leader: "true" + sessionAffinity: None +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: ovn-central + namespace: kube-system + annotations: + kubernetes.io/description: | + OVN components: northd, nb and sb. +spec: + replicas: {{ kube_ovn_central_replics }} + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: ovn-central + template: + metadata: + labels: + app: ovn-central + component: network + type: infra + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: ovn-central + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: ovn-ovs + hostNetwork: true + containers: + - name: ovn-central + image: {{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: ["/kube-ovn/start-db.sh"] + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + - SYS_NICE + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: NODE_IPS + value: "{{ kube_ovn_central_ips }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs + - name: ENABLE_BIND_LOCAL_IP + value: "{{ kube_ovn_bind_local_ip_enabled }}" + - name: PROBE_INTERVAL + value: "180000" + - name: OVN_NORTHD_PROBE_INTERVAL + value: "5000" + - name: OVN_LEADER_PROBE_INTERVAL + value: "5" + resources: + requests: + cpu: {{ kube_ovn_db_cpu_request }} + memory: {{ kube_ovn_db_memory_request }} + limits: + cpu: {{ kube_ovn_db_cpu_limit }} + memory: {{ kube_ovn_db_memory_limit }} + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + readinessProbe: + exec: + command: + - bash + - /kube-ovn/ovn-healthcheck.sh + periodSeconds: 15 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - bash + - /kube-ovn/ovn-healthcheck.sh + initialDelaySeconds: 30 + periodSeconds: 15 + failureThreshold: 5 + timeoutSeconds: 45 + nodeSelector: + kubernetes.io/os: "linux" + kube-ovn/role: "master" + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn + - name: localtime + hostPath: + path: /etc/localtime + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ovs-ovn + namespace: kube-system + annotations: + kubernetes.io/description: | + This daemon set launches the openvswitch daemon. +spec: + selector: + matchLabels: + app: ovs + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: ovs + component: network + type: infra + spec: + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: ovn-ovs + hostNetwork: true + hostPID: true + containers: + - name: openvswitch + image: {% if kube_ovn_dpdk_enabled %}{{ kube_ovn_dpdk_container_image_repo }}:{{ kube_ovn_dpdk_container_image_tag }}{% else %}{{ kube_ovn_container_image_repo }}:{{ kube_ovn_container_image_tag }}{% endif %} + + imagePullPolicy: {{ k8s_image_pull_policy }} + command: [{% if kube_ovn_dpdk_enabled %}"/kube-ovn/start-ovs-dpdk.sh"{% else %}"/kube-ovn/start-ovs.sh"{% endif %}] + securityContext: + runAsUser: 0 + privileged: false + capabilities: + add: + - NET_ADMIN + - NET_BIND_SERVICE + - SYS_MODULE + - SYS_NICE + env: + - name: ENABLE_SSL + value: "{{ kube_ovn_enable_ssl | lower }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{% if not kube_ovn_dpdk_enabled %} + - name: HW_OFFLOAD + value: "{{ kube_ovn_hw_offload | string | lower }}" + - name: TUNNEL_TYPE + value: "{{ kube_ovn_tunnel_type }}" +{% endif %} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OVN_DB_IPS + value: "{{ kube_ovn_central_ips }}" + volumeMounts: + - mountPath: /var/run/netns + name: host-ns + mountPropagation: HostToContainer + - mountPath: /lib/modules + name: host-modules + readOnly: true + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/cni/net.d + name: cni-conf + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn + name: host-config-ovn + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn +{% if kube_ovn_dpdk_enabled %} + - mountPath: /opt/ovs-config + name: host-config-ovs + - mountPath: /dev/hugepages + name: hugepage +{% endif %} + - mountPath: /etc/localtime + name: localtime + - mountPath: /var/run/tls + name: kube-ovn-tls + - mountPath: /var/run/containerd + name: cruntime + readOnly: true + readinessProbe: + exec: + command: + - bash +{% if kube_ovn_dpdk_enabled %} + - /kube-ovn/ovs-dpdk-healthcheck.sh +{% else %} + - /kube-ovn/ovs-healthcheck.sh +{% endif %} + periodSeconds: 5 + timeoutSeconds: 45 + livenessProbe: + exec: + command: + - bash +{% if kube_ovn_dpdk_enabled %} + - /kube-ovn/ovs-dpdk-healthcheck.sh +{% else %} + - /kube-ovn/ovs-healthcheck.sh +{% endif %} + initialDelaySeconds: 60 + periodSeconds: 5 + failureThreshold: 5 + timeoutSeconds: 45 + resources: +{% if kube_ovn_dpdk_enabled %} + requests: + cpu: {{ kube_ovn_dpdk_node_cpu_request }} + memory: {{ kube_ovn_dpdk_node_memory_request }} + limits: + cpu: {{ kube_ovn_dpdk_node_cpu_limit }} + memory: {{ kube_ovn_dpdk_node_memory_limit }} + hugepages-1Gi: 1Gi +{% else %} + requests: + cpu: {{ kube_ovn_node_cpu_request }} + memory: {{ kube_ovn_node_memory_request }} + limits: + cpu: {{ kube_ovn_node_cpu_limit }} + memory: {{ kube_ovn_node_memory_limit }} +{% endif %} + nodeSelector: + kubernetes.io/os: "linux" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-ns + hostPath: + path: /var/run/netns + - name: cni-conf + hostPath: + path: /etc/cni/net.d + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-config-ovn + hostPath: + path: /etc/origin/ovn + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn +{% if kube_ovn_dpdk_enabled %} + - name: host-config-ovs + hostPath: + path: /opt/ovs-config + type: DirectoryOrCreate + - name: hugepage + emptyDir: + medium: HugePages +{% endif %} + - name: localtime + hostPath: + path: /etc/localtime + - name: cruntime + hostPath: + path: /var/run/containerd + - name: kube-ovn-tls + secret: + optional: true + secretName: kube-ovn-tls diff --git a/roles/network_plugin/kube-router/defaults/main.yml b/roles/network_plugin/kube-router/defaults/main.yml new file mode 100644 index 00000000000..c01a3532bd8 --- /dev/null +++ b/roles/network_plugin/kube-router/defaults/main.yml @@ -0,0 +1,69 @@ +--- +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +kube_router_advertise_loadbalancer_ip: false + +# Enables BGP graceful restarts +kube_router_bgp_graceful_restart: true + +# Adjust manifest of kube-router daemonset template with DSR needed changes +kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +kube_router_dns_policy: ClusterFirstWithHostNet + +# Adds annotations to kubernetes nodes for advanced configuration of BGP Peers. +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md + +# Array of annotations for master +kube_router_annotations_master: [] + +# Array of annotations for every node +kube_router_annotations_node: [] + +# Array of common annotations for every node +kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +kube_router_metrics_port: 9255 diff --git a/roles/network_plugin/kube-router/handlers/main.yml b/roles/network_plugin/kube-router/handlers/main.yml new file mode 100644 index 00000000000..ad5eb21401d --- /dev/null +++ b/roles/network_plugin/kube-router/handlers/main.yml @@ -0,0 +1,20 @@ +--- +- name: Kube-router | delete kube-router docker containers + shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f" + args: + executable: /bin/bash + register: docker_kube_router_remove + until: docker_kube_router_remove is succeeded + retries: 5 + when: container_manager in ["docker"] + listen: Reset_kube_router + +- name: Kube-router | delete kube-router crio/containerd containers + shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"' + args: + executable: /bin/bash + register: crictl_kube_router_remove + until: crictl_kube_router_remove is succeeded + retries: 5 + when: container_manager in ["crio", "containerd"] + listen: Reset_kube_router diff --git a/roles/network_plugin/kube-router/meta/main.yml b/roles/network_plugin/kube-router/meta/main.yml new file mode 100644 index 00000000000..9b7065f1854 --- /dev/null +++ b/roles/network_plugin/kube-router/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml new file mode 100644 index 00000000000..9cb7f6e7c43 --- /dev/null +++ b/roles/network_plugin/kube-router/tasks/annotate.yml @@ -0,0 +1,21 @@ +--- +- name: Kube-router | Add annotations on kube_control_plane + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_master }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_master is defined and 'kube_control_plane' in group_names + +- name: Kube-router | Add annotations on kube_node + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_node }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_node is defined and 'kube_node' in group_names + +- name: Kube-router | Add common annotations on all servers + command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" + with_items: + - "{{ kube_router_annotations_all }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: kube_router_annotations_all is defined and 'k8s_cluster' in group_names diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml new file mode 100644 index 00000000000..d47a0d1e2a6 --- /dev/null +++ b/roles/network_plugin/kube-router/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: Kube-router | Create annotations + import_tasks: annotate.yml + tags: annotate + +- name: Kube-router | Create config directory + file: + path: /var/lib/kube-router + state: directory + owner: "{{ kube_owner }}" + recurse: true + mode: "0755" + +- name: Kube-router | Create kubeconfig + template: + src: kubeconfig.yml.j2 + dest: /var/lib/kube-router/kubeconfig + mode: "0644" + owner: "{{ kube_owner }}" + notify: + - Reset_kube_router + +- name: Kube-router | Slurp cni config + slurp: + src: /etc/cni/net.d/10-kuberouter.conflist + register: cni_config_slurp + ignore_errors: true # noqa ignore-errors + +- name: Kube-router | Set cni_config variable + set_fact: + cni_config: "{{ cni_config_slurp.content | b64decode | from_json }}" + when: + - not cni_config_slurp.failed + +- name: Kube-router | Set host_subnet variable + when: + - cni_config is defined + - cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0 + set_fact: + host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}" + +- name: Kube-router | Create cni config + template: + src: cni-conf.json.j2 + dest: /etc/cni/net.d/10-kuberouter.conflist + mode: "0644" + owner: "{{ kube_owner }}" + notify: + - Reset_kube_router + +- name: Kube-router | Delete old configuration + file: + path: /etc/cni/net.d/10-kuberouter.conf + state: absent + +- name: Kube-router | Create manifest + template: + src: kube-router.yml.j2 + dest: "{{ kube_config_dir }}/kube-router.yml" + mode: "0644" + delegate_to: "{{ groups['kube_control_plane'] | first }}" + run_once: true diff --git a/roles/network_plugin/kube-router/tasks/reset.yml b/roles/network_plugin/kube-router/tasks/reset.yml new file mode 100644 index 00000000000..32f707591e4 --- /dev/null +++ b/roles/network_plugin/kube-router/tasks/reset.yml @@ -0,0 +1,28 @@ +--- +- name: Reset | check kube-dummy-if network device + stat: + path: /sys/class/net/kube-dummy-if + get_attributes: false + get_checksum: false + get_mime: false + register: kube_dummy_if + +- name: Reset | remove the network device created by kube-router + command: ip link del kube-dummy-if + when: kube_dummy_if.stat.exists + +- name: Check kube-bridge exists + stat: + path: /sys/class/net/kube-bridge + get_attributes: false + get_checksum: false + get_mime: false + register: kube_bridge_if + +- name: Reset | donw the network bridge create by kube-router + command: ip link set kube-bridge down + when: kube_bridge_if.stat.exists + +- name: Reset | remove the network bridge create by kube-router + command: ip link del kube-bridge + when: kube_bridge_if.stat.exists diff --git a/roles/network_plugin/kube-router/templates/cni-conf.json.j2 b/roles/network_plugin/kube-router/templates/cni-conf.json.j2 new file mode 100644 index 00000000000..91fafacc4fc --- /dev/null +++ b/roles/network_plugin/kube-router/templates/cni-conf.json.j2 @@ -0,0 +1,27 @@ +{ + "cniVersion":"0.3.0", + "name":"kubernetes", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, +{% if kube_router_support_hairpin_mode %} + "hairpinMode":true, +{% endif %} + "ipam":{ +{% if host_subnet is defined %} + "subnet": "{{ host_subnet }}", +{% endif %} + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "portMappings":true + } + } + ] +} diff --git a/roles/network_plugin/kube-router/templates/kube-router.yml.j2 b/roles/network_plugin/kube-router/templates/kube-router.yml.j2 new file mode 100644 index 00000000000..d868287d481 --- /dev/null +++ b/roles/network_plugin/kube-router/templates/kube-router.yml.j2 @@ -0,0 +1,228 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + minReadySeconds: 3 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: +{% if kube_router_enable_metrics %} + prometheus.io/path: {{ kube_router_metrics_path }} + prometheus.io/port: "{{ kube_router_metrics_port }}" + prometheus.io/scrape: "true" +{% endif %} + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + containers: + - name: kube-router + image: {{ kube_router_image_repo }}:{{ kube_router_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --run-router={{ kube_router_run_router | bool }} + - --run-firewall={{ kube_router_run_firewall | bool }} + - --run-service-proxy={{ kube_router_run_service_proxy | bool }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --bgp-graceful-restart={{ kube_router_bgp_graceful_restart }} +{% if kube_router_advertise_cluster_ip %} + - --advertise-cluster-ip +{% endif %} +{% if kube_router_advertise_external_ip %} + - --advertise-external-ip +{% endif %} +{% if kube_router_advertise_loadbalancer_ip %} + - --advertise-loadbalancer-ip +{% endif %} +{% if kube_router_cluster_asn %} + - --cluster-asn={{ kube_router_cluster_asn }} +{% endif %} +{% if kube_router_peer_router_asns %} + - --peer-router-asns={{ kube_router_peer_router_asns }} +{% endif %} +{% if kube_router_peer_router_ips %} + - --peer-router-ips={{ kube_router_peer_router_ips }} +{% endif %} +{% if kube_router_peer_router_ports %} + - --peer-router-ports={{ kube_router_peer_router_ports }} +{% endif %} +{% if kube_router_enable_metrics %} + - --metrics-path={{ kube_router_metrics_path }} + - --metrics-port={{ kube_router_metrics_port }} +{% endif %} +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - --runtime-endpoint=unix:///var/run/docker.sock +{% endif %} +{% if container_manager == "containerd" %} +{% endif %} + - --runtime-endpoint=unix:///run/containerd/containerd.sock +{% endif %} +{% for arg in kube_router_extra_args %} + - "{{ arg }}" +{% endfor %} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - name: docker-socket + mountPath: /var/run/docker.sock + readOnly: true +{% endif %} +{% if container_manager == "containerd" %} + - name: containerd-socket + mountPath: /run/containerd/containerd.sock + readOnly: true +{% endif %} +{% endif %} + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false +{% if kube_router_enable_metrics %} + ports: + - containerPort: {{ kube_router_metrics_port }} + hostPort: {{ kube_router_metrics_port }} + name: metrics + protocol: TCP +{% endif %} + hostNetwork: true + dnsPolicy: {{ kube_router_dns_policy }} +{% if kube_router_enable_dsr %} + hostIPC: true + hostPID: true +{% endif %} + tolerations: + - operator: Exists + volumes: +{% if kube_router_enable_dsr %} +{% if container_manager == "docker" %} + - name: docker-socket + hostPath: + path: /var/run/docker.sock + type: Socket +{% endif %} +{% if container_manager == "containerd" %} + - name: containerd-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket +{% endif %} +{% endif %} + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kubeconfig + hostPath: + path: /var/lib/kube-router + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 b/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 new file mode 100644 index 00000000000..470885111d9 --- /dev/null +++ b/roles/network_plugin/kube-router/templates/kubeconfig.yml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusterCIDR: {{ kube_pods_subnets }} +clusters: +- name: cluster + cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-router + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +contexts: +- context: + cluster: cluster + user: kube-router + name: kube-router-context +current-context: kube-router-context diff --git a/roles/network_plugin/macvlan/defaults/main.yml b/roles/network_plugin/macvlan/defaults/main.yml new file mode 100644 index 00000000000..70a8dd02826 --- /dev/null +++ b/roles/network_plugin/macvlan/defaults/main.yml @@ -0,0 +1,6 @@ +--- +macvlan_interface: eth0 +enable_nat_default_gateway: true + +# sysctl_file_path to add sysctl conf to +sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" diff --git a/roles/network_plugin/macvlan/files/ifdown-local b/roles/network_plugin/macvlan/files/ifdown-local new file mode 100644 index 00000000000..003b8a1b41f --- /dev/null +++ b/roles/network_plugin/macvlan/files/ifdown-local @@ -0,0 +1,6 @@ +#!/bin/bash + +POSTDOWNNAME="/etc/sysconfig/network-scripts/post-down-$1" +if [ -x $POSTDOWNNAME ]; then + exec $POSTDOWNNAME +fi diff --git a/roles/network_plugin/macvlan/files/ifdown-macvlan b/roles/network_plugin/macvlan/files/ifdown-macvlan new file mode 100755 index 00000000000..b79b9c11ec8 --- /dev/null +++ b/roles/network_plugin/macvlan/files/ifdown-macvlan @@ -0,0 +1,40 @@ +#!/bin/bash +# +# initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-eth" +fi + +${OTHERSCRIPT} ${CONFIG} + +ip link del ${DEVICE} type ${TYPE:-macvlan} diff --git a/roles/network_plugin/macvlan/files/ifup-local b/roles/network_plugin/macvlan/files/ifup-local new file mode 100755 index 00000000000..3b6891eb996 --- /dev/null +++ b/roles/network_plugin/macvlan/files/ifup-local @@ -0,0 +1,6 @@ +#!/bin/bash + +POSTUPNAME="/etc/sysconfig/network-scripts/post-up-$1" +if [ -x $POSTUPNAME ]; then + exec $POSTUPNAME +fi diff --git a/roles/network_plugin/macvlan/files/ifup-macvlan b/roles/network_plugin/macvlan/files/ifup-macvlan new file mode 100755 index 00000000000..97daec0c4d3 --- /dev/null +++ b/roles/network_plugin/macvlan/files/ifup-macvlan @@ -0,0 +1,43 @@ +#!/bin/bash +# +# initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-eth" +fi + +ip link add \ + link ${MACVLAN_PARENT} \ + name ${DEVICE} \ + type ${TYPE:-macvlan} mode ${MACVLAN_MODE:-private} + +${OTHERSCRIPT} ${CONFIG} diff --git a/roles/network_plugin/macvlan/handlers/main.yml b/roles/network_plugin/macvlan/handlers/main.yml new file mode 100644 index 00000000000..e4844c22174 --- /dev/null +++ b/roles/network_plugin/macvlan/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: Macvlan | reload network + service: + # noqa: jinja[spacing] + name: >- + {% if ansible_os_family == "RedHat" -%} + network + {%- elif ansible_distribution == "Ubuntu" and ansible_distribution_release == "bionic" -%} + systemd-networkd + {%- elif ansible_os_family == "Debian" -%} + networking + {%- endif %} + state: restarted + when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and kube_network_plugin not in ['calico'] + listen: Macvlan | restart network diff --git a/roles/network_plugin/macvlan/meta/main.yml b/roles/network_plugin/macvlan/meta/main.yml new file mode 100644 index 00000000000..9b7065f1854 --- /dev/null +++ b/roles/network_plugin/macvlan/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/roles/network_plugin/macvlan/tasks/main.yml b/roles/network_plugin/macvlan/tasks/main.yml new file mode 100644 index 00000000000..6ffe3348cd0 --- /dev/null +++ b/roles/network_plugin/macvlan/tasks/main.yml @@ -0,0 +1,110 @@ +--- +- name: Macvlan | Retrieve Pod Cidr + command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'" + changed_when: false + register: node_pod_cidr_cmd + delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: Macvlan | set node_pod_cidr + set_fact: + node_pod_cidr: "{{ node_pod_cidr_cmd.stdout }}" + +- name: Macvlan | Retrieve default gateway network interface + become: false + raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/' + changed_when: false + register: node_default_gateway_interface_cmd + +- name: Macvlan | set node_default_gateway_interface + set_fact: + node_default_gateway_interface: "{{ node_default_gateway_interface_cmd.stdout | trim }}" + +- name: Macvlan | Install network gateway interface on debian + template: + src: debian-network-macvlan.cfg.j2 + dest: /etc/network/interfaces.d/60-mac0.cfg + mode: "0644" + notify: Macvlan | restart network + when: ansible_os_family in ["Debian"] + +- name: Install macvlan config on RH distros + when: ansible_os_family == "RedHat" + block: + - name: Macvlan | Install macvlan script on centos + copy: + src: "{{ item }}" + dest: /etc/sysconfig/network-scripts/ + owner: root + group: root + mode: "0755" + with_fileglob: + - files/* + + - name: Macvlan | Install post-up script on centos + copy: + src: "files/ifup-local" + dest: /sbin/ + owner: root + group: root + mode: "0755" + when: enable_nat_default_gateway + + - name: Macvlan | Install network gateway interface on centos + template: + src: "{{ item.src }}.j2" + dest: "/etc/sysconfig/network-scripts/{{ item.dst }}" + mode: "0644" + with_items: + - {src: centos-network-macvlan.cfg, dst: ifcfg-mac0 } + - {src: centos-routes-macvlan.cfg, dst: route-mac0 } + - {src: centos-postup-macvlan.cfg, dst: post-up-mac0 } + notify: Macvlan | restart network + +- name: Install macvlan config on Flatcar + when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] + block: + - name: Macvlan | Install service nat via gateway on Flatcar Container Linux + template: + src: coreos-service-nat_ouside.j2 + dest: /etc/systemd/system/enable_nat_ouside.service + mode: "0644" + when: enable_nat_default_gateway + + - name: Macvlan | Enable service nat via gateway on Flatcar Container Linux + command: "{{ item }}" + with_items: + - systemctl daemon-reload + - systemctl enable enable_nat_ouside.service + when: enable_nat_default_gateway + + - name: Macvlan | Install network gateway interface on Flatcar Container Linux + template: + src: "{{ item.src }}.j2" + dest: "/etc/systemd/network/{{ item.dst }}" + mode: "0644" + with_items: + - {src: coreos-device-macvlan.cfg, dst: macvlan.netdev } + - {src: coreos-interface-macvlan.cfg, dst: output.network } + - {src: coreos-network-macvlan.cfg, dst: macvlan.network } + notify: Macvlan | restart network + +- name: Macvlan | Install cni definition for Macvlan + template: + src: 10-macvlan.conf.j2 + dest: /etc/cni/net.d/10-macvlan.conf + mode: "0644" + +- name: Macvlan | Install loopback definition for Macvlan + template: + src: 99-loopback.conf.j2 + dest: /etc/cni/net.d/99-loopback.conf + mode: "0644" + +- name: Enable net.ipv4.conf.all.arp_notify in sysctl + ansible.posix.sysctl: + name: net.ipv4.conf.all.arp_notify + value: 1 + sysctl_set: true + sysctl_file: "{{ sysctl_file_path }}" + state: present + reload: true diff --git a/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 b/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 new file mode 100644 index 00000000000..8924547600b --- /dev/null +++ b/roles/network_plugin/macvlan/templates/10-macvlan.conf.j2 @@ -0,0 +1,15 @@ +{ + "cniVersion": "0.4.0", + "name": "mynet", + "type": "macvlan", + "master": "{{ macvlan_interface }}", + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ node_pod_cidr }}", + "routes": [ + { "dst": "0.0.0.0/0" } + ], + "gateway": "{{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }}" + } +} diff --git a/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 b/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 new file mode 100644 index 00000000000..b41ab65841e --- /dev/null +++ b/roles/network_plugin/macvlan/templates/99-loopback.conf.j2 @@ -0,0 +1,5 @@ +{ + "cniVersion": "0.2.0", + "name": "lo", + "type": "loopback" +} diff --git a/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 new file mode 100644 index 00000000000..1e6c0aab6a3 --- /dev/null +++ b/roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2 @@ -0,0 +1,13 @@ +DEVICE=mac0 +DEVICETYPE=macvlan +TYPE=macvlan +BOOTPROTO=none +ONBOOT=yes +NM_CONTROLLED=no + +MACVLAN_PARENT={{ macvlan_interface }} +MACVLAN_MODE=bridge + +IPADDR={{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }} +NETMASK={{ node_pod_cidr|ansible.utils.ipaddr('netmask') }} +NETWORK={{ node_pod_cidr|ansible.utils.ipaddr('network') }} diff --git a/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 new file mode 100644 index 00000000000..87f1f56a39f --- /dev/null +++ b/roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2 @@ -0,0 +1,3 @@ +{% if enable_nat_default_gateway %} +iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} diff --git a/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 new file mode 100644 index 00000000000..254827e4358 --- /dev/null +++ b/roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2 @@ -0,0 +1,3 @@ +{% if enable_nat_default_gateway %} +iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} diff --git a/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 new file mode 100644 index 00000000000..60400dd4917 --- /dev/null +++ b/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 @@ -0,0 +1,7 @@ +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} +{{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} diff --git a/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 new file mode 100644 index 00000000000..2418dacfebb --- /dev/null +++ b/roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2 @@ -0,0 +1,6 @@ +[NetDev] +Name=mac0 +Kind=macvlan + +[MACVLAN] +Mode=bridge diff --git a/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 new file mode 100644 index 00000000000..342f68081fb --- /dev/null +++ b/roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2 @@ -0,0 +1,6 @@ +[Match] +Name={{ macvlan_interface }} + +[Network] +MACVLAN=mac0 +DHCP=yes diff --git a/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 new file mode 100644 index 00000000000..0c4c33b0a67 --- /dev/null +++ b/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 @@ -0,0 +1,17 @@ +[Match] +Name=mac0 + +[Network] +Address={{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }}/{{ node_pod_cidr|ansible.utils.ipaddr('prefix') }} + +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} +[Route] +Gateway={{ hostvars[host]['access_ip'] }} +Destination={{ hostvars[host]['node_pod_cidr'] }} +GatewayOnlink=yes + +{% endif %} +{% endif %} +{% endfor %} diff --git a/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 b/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 new file mode 100644 index 00000000000..1d8df03191d --- /dev/null +++ b/roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2 @@ -0,0 +1,6 @@ +[Service] +Type=oneshot +ExecStart=/bin/bash -c "iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE" + +[Install] +WantedBy=sys-subsystem-net-devices-mac0.device diff --git a/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 new file mode 100644 index 00000000000..cbd4325c9da --- /dev/null +++ b/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 @@ -0,0 +1,26 @@ +auto mac0 +iface mac0 inet static + address {{ node_pod_cidr|ansible.utils.ipaddr('net')|ansible.utils.ipaddr(1)|ansible.utils.ipaddr('address') }} + network {{ node_pod_cidr|ansible.utils.ipaddr('network') }} + netmask {{ node_pod_cidr|ansible.utils.ipaddr('netmask') }} + broadcast {{ node_pod_cidr|ansible.utils.ipaddr('broadcast') }} + pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} + post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} +{% if enable_nat_default_gateway %} + post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE +{% endif %} +{% for host in groups['kube_node'] %} +{% if hostvars[host]['access_ip'] is defined %} +{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} + post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} +{% endif %} +{% endif %} +{% endfor %} + post-down iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ansible.utils.ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE + post-down ip link delete mac0 diff --git a/roles/network_plugin/meta/main.yml b/roles/network_plugin/meta/main.yml index 27f37df014c..1f2f99df483 100644 --- a/roles/network_plugin/meta/main.yml +++ b/roles/network_plugin/meta/main.yml @@ -1,6 +1,49 @@ --- dependencies: + - role: network_plugin/cni + when: kube_network_plugin != 'none' + - role: network_plugin/cilium - when: kube_network_plugin == 'cilium' or cilium_deploy_additionally + when: kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool tags: - cilium + + - role: network_plugin/calico + when: kube_network_plugin == 'calico' + tags: + - calico + + - role: network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: + - flannel + + - role: network_plugin/weave + when: kube_network_plugin == 'weave' + tags: + - weave + + - role: network_plugin/macvlan + when: kube_network_plugin == 'macvlan' + tags: + - macvlan + + - role: network_plugin/kube-ovn + when: kube_network_plugin == 'kube-ovn' + tags: + - kube-ovn + + - role: network_plugin/kube-router + when: kube_network_plugin == 'kube-router' + tags: + - kube-router + + - role: network_plugin/custom_cni + when: kube_network_plugin == 'custom_cni' + tags: + - custom_cni + + - role: network_plugin/multus + when: kube_network_plugin_multus + tags: + - multus diff --git a/roles/network_plugin/multus/defaults/main.yml b/roles/network_plugin/multus/defaults/main.yml new file mode 100644 index 00000000000..a982ba6ba31 --- /dev/null +++ b/roles/network_plugin/multus/defaults/main.yml @@ -0,0 +1,10 @@ +--- +multus_conf_file: "auto" +multus_cni_conf_dir_host: "/etc/cni/net.d" +multus_cni_bin_dir_host: "/opt/cni/bin" +multus_cni_run_dir_host: "/run" +multus_cni_conf_dir: "{{ ('/host', multus_cni_conf_dir_host) | join }}" +multus_cni_bin_dir: "{{ ('/host', multus_cni_bin_dir_host) | join }}" +multus_cni_run_dir: "{{ ('/host', multus_cni_run_dir_host) | join }}" +multus_kubeconfig_file_host: "{{ (multus_cni_conf_dir_host, '/multus.d/multus.kubeconfig') | join }}" +multus_namespace_isolation: false diff --git a/roles/network_plugin/multus/files/multus-clusterrole.yml b/roles/network_plugin/multus/files/multus-clusterrole.yml new file mode 100644 index 00000000000..b574069cd9b --- /dev/null +++ b/roles/network_plugin/multus/files/multus-clusterrole.yml @@ -0,0 +1,28 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update diff --git a/roles/network_plugin/multus/files/multus-clusterrolebinding.yml b/roles/network_plugin/multus/files/multus-clusterrolebinding.yml new file mode 100644 index 00000000000..2d1e1a4f41a --- /dev/null +++ b/roles/network_plugin/multus/files/multus-clusterrolebinding.yml @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system diff --git a/roles/network_plugin/multus/files/multus-crd.yml b/roles/network_plugin/multus/files/multus-crd.yml new file mode 100644 index 00000000000..24b2c58fca4 --- /dev/null +++ b/roles/network_plugin/multus/files/multus-crd.yml @@ -0,0 +1,45 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string diff --git a/roles/network_plugin/multus/files/multus-serviceaccount.yml b/roles/network_plugin/multus/files/multus-serviceaccount.yml new file mode 100644 index 00000000000..62423082ca0 --- /dev/null +++ b/roles/network_plugin/multus/files/multus-serviceaccount.yml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system diff --git a/roles/network_plugin/multus/meta/main.yml b/roles/network_plugin/multus/meta/main.yml new file mode 100644 index 00000000000..9b7065f1854 --- /dev/null +++ b/roles/network_plugin/multus/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/roles/network_plugin/multus/tasks/main.yml b/roles/network_plugin/multus/tasks/main.yml new file mode 100644 index 00000000000..0869da7b54e --- /dev/null +++ b/roles/network_plugin/multus/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: Multus | Copy manifest files + copy: + src: "{{ item.file }}" + dest: "{{ kube_config_dir }}" + mode: "0644" + with_items: + - {name: multus-crd, file: multus-crd.yml, type: customresourcedefinition} + - {name: multus-serviceaccount, file: multus-serviceaccount.yml, type: serviceaccount} + - {name: multus-clusterrole, file: multus-clusterrole.yml, type: clusterrole} + - {name: multus-clusterrolebinding, file: multus-clusterrolebinding.yml, type: clusterrolebinding} + register: multus_manifest_1 + when: inventory_hostname == groups['kube_control_plane'][0] + +- name: Multus | Check container engine type + set_fact: + container_manager_types: "{{ ansible_play_hosts_all | map('extract', hostvars, ['container_manager']) | list | unique }}" + +- name: Multus | Copy manifest templates + template: + src: multus-daemonset.yml.j2 + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: multus-daemonset-containerd, file: multus-daemonset-containerd.yml, type: daemonset, engine: containerd } + - {name: multus-daemonset-docker, file: multus-daemonset-docker.yml, type: daemonset, engine: docker } + - {name: multus-daemonset-crio, file: multus-daemonset-crio.yml, type: daemonset, engine: crio } + register: multus_manifest_2 + vars: + query: "*|[?container_manager=='{{ container_manager }}']|[0].inventory_hostname" + vars_from_node: "{{ hostvars | json_query(query) }}" + delegate_to: "{{ groups['kube_control_plane'][0] }}" + when: + - item.engine in container_manager_types + - hostvars[inventory_hostname].container_manager == item.engine + - inventory_hostname == vars_from_node diff --git a/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 new file mode 100644 index 00000000000..43d1193a92f --- /dev/null +++ b/roles/network_plugin/multus/templates/multus-daemonset.yml.j2 @@ -0,0 +1,100 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: +{% if container_manager_types | length >= 2 %} + name: kube-multus-{{ container_manager }}-{{ image_arch }} +{% else %} + name: kube-multus-ds-{{ image_arch }} +{% endif %} + namespace: kube-system + labels: + tier: node + app: multus +spec: + selector: + matchLabels: + tier: node + app: multus + template: + metadata: + labels: + tier: node + app: multus + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + priorityClassName: system-node-critical + nodeSelector: + kubernetes.io/arch: {{ image_arch }} +{% if container_manager_types | length >= 2 %} + kubespray.io/container_manager: {{ container_manager }} +{% endif %} + tolerations: + - operator: Exists + serviceAccountName: multus + initContainers: + - name: install-multus-binary + image: {{ multus_image_repo }}:{{ multus_image_tag }} + command: ["/install_multus"] + args: + - "--type" + - "thin" + resources: + requests: + cpu: "10m" + memory: "15Mi" + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cnibin + mountPath: {{ multus_cni_bin_dir }} + mountPropagation: Bidirectional + containers: + - name: kube-multus + image: {{ multus_image_repo }}:{{ multus_image_tag }} + command: ["/thin_entrypoint"] + args: + - "--cni-conf-dir={{ multus_cni_conf_dir }}" + - "--multus-autoconfig-dir={{ multus_cni_conf_dir }}" + - "--cni-bin-dir={{ multus_cni_bin_dir }}" + - "--multus-conf-file={{ multus_conf_file }}" + - "--multus-kubeconfig-file-host={{ multus_kubeconfig_file_host }}" + - "--namespace-isolation={{ multus_namespace_isolation | string | lower }}" + resources: + requests: + cpu: "100m" + memory: "90Mi" + limits: + cpu: "100m" + memory: "90Mi" + securityContext: + privileged: true +{% if container_manager == 'crio' %} + capabilities: + add: ["SYS_ADMIN"] +{% endif %} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: +{% if container_manager == 'crio' %} + - name: run + mountPath: {{ multus_cni_run_dir }} + mountPropagation: HostToContainer +{% endif %} + - name: cni + mountPath: {{ multus_cni_conf_dir }} + - name: cnibin + mountPath: {{ multus_cni_bin_dir }} + volumes: +{% if container_manager == 'crio' %} + - name: run + hostPath: + path: {{ multus_cni_run_dir_host }} +{% endif %} + - name: cni + hostPath: + path: {{ multus_cni_conf_dir_host }} + - name: cnibin + hostPath: + path: {{ multus_cni_bin_dir_host }} diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml new file mode 100644 index 00000000000..a16f3ec6f00 --- /dev/null +++ b/roles/network_plugin/ovn4nfv/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Ovn4nfv | Label control-plane node + command: >- + {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane + when: + - inventory_hostname == groups['kube_control_plane'][0] + +- name: Ovn4nfv | Create ovn4nfv-k8s manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: "0644" + with_items: + - {name: ovn-daemonset, file: ovn-daemonset.yml} + - {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml} + register: ovn4nfv_node_manifests diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml new file mode 100644 index 00000000000..337d8e79982 --- /dev/null +++ b/roles/network_plugin/weave/defaults/main.yml @@ -0,0 +1,64 @@ +--- + +# Weave's network password for encryption, if null then no network encryption. +weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +weave_ipalloc_range: "{{ kube_pods_subnets }}" + +# Set to 0 to disable Network Policy Controller (default is on) +weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +weave_iptables_backend: ~ + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +weave_npc_extra_args: ~ diff --git a/roles/network_plugin/weave/meta/main.yml b/roles/network_plugin/weave/meta/main.yml new file mode 100644 index 00000000000..9b7065f1854 --- /dev/null +++ b/roles/network_plugin/weave/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml new file mode 100644 index 00000000000..ccb43135219 --- /dev/null +++ b/roles/network_plugin/weave/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Weave | Create manifest + template: + src: weave-net.yml.j2 + dest: "{{ kube_config_dir }}/weave-net.yml" + mode: "0644" + +- name: Weave | Fix nodePort for Weave + template: + src: 10-weave.conflist.j2 + dest: /etc/cni/net.d/10-weave.conflist + mode: "0644" diff --git a/roles/network_plugin/weave/templates/10-weave.conflist.j2 b/roles/network_plugin/weave/templates/10-weave.conflist.j2 new file mode 100644 index 00000000000..9aab7e98c07 --- /dev/null +++ b/roles/network_plugin/weave/templates/10-weave.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.0", + "name": "weave", + "plugins": [ + { + "name": "weave", + "type": "weave-net", + "hairpinMode": {{ weave_hairpin_mode | bool | lower }} + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] +} diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 new file mode 100644 index 00000000000..3a3886510ac --- /dev/null +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -0,0 +1,297 @@ +--- +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: weave-net + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - pods + - namespaces + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - 'networking.k8s.io' + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - nodes/status + verbs: + - patch + - update + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: weave-net + labels: + name: weave-net + roleRef: + kind: ClusterRole + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - weave-net + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + roleRef: + kind: Role + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + spec: + # Wait 5 seconds to let pod connect before rolling next pod + selector: + matchLabels: + name: weave-net + minReadySeconds: 5 + template: + metadata: + labels: + name: weave-net + spec: + initContainers: + - name: weave-init + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /home/weave/init.sh + env: + securityContext: + privileged: true + volumeMounts: + - name: cni-bin + mountPath: /host/opt + - name: cni-bin2 + mountPath: /host/home + - name: cni-conf + mountPath: /host/etc + - name: lib-modules + mountPath: /lib/modules + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + containers: + - name: weave + command: + - /home/weave/launch.sh + env: + - name: INIT_CONTAINER + value: "true" + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: WEAVE_PASSWORD + valueFrom: + secretKeyRef: + name: weave-net + key: WEAVE_PASSWORD + - name: CHECKPOINT_DISABLE + value: "{{ weave_checkpoint_disable | bool | int }}" + - name: CONN_LIMIT + value: "{{ weave_conn_limit | int }}" + - name: HAIRPIN_MODE + value: "{{ weave_hairpin_mode | bool | lower }}" + - name: IPALLOC_RANGE + value: "{{ weave_ipalloc_range }}" + - name: EXPECT_NPC + value: "{{ weave_expect_npc | bool | int }}" +{% if weave_kube_peers %} + - name: KUBE_PEERS + value: "{{ weave_kube_peers }}" +{% endif %} +{% if weave_ipalloc_init %} + - name: IPALLOC_INIT + value: "{{ weave_ipalloc_init }}" +{% endif %} +{% if weave_expose_ip %} + - name: WEAVE_EXPOSE_IP + value: "{{ weave_expose_ip }}" +{% endif %} +{% if weave_metrics_addr %} + - name: WEAVE_METRICS_ADDR + value: "{{ weave_metrics_addr }}" +{% endif %} +{% if weave_status_addr %} + - name: WEAVE_STATUS_ADDR + value: "{{ weave_status_addr }}" +{% endif %} +{% if weave_iptables_backend %} + - name: IPTABLES_BACKEND + value: "{{ weave_iptables_backend }}" +{% endif %} + - name: WEAVE_MTU + value: "{{ weave_mtu | int }}" + - name: NO_MASQ_LOCAL + value: "{{ weave_no_masq_local | bool | int }}" +{% if weave_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_extra_args }}" +{% endif %} + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /status + port: 6784 + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: weavedb + mountPath: /weavedb + - name: dbus + mountPath: /host/var/lib/dbus + readOnly: true + - mountPath: /host/etc/machine-id + name: cni-machine-id + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + - name: weave-npc + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName +{% if weave_npc_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_npc_extra_args }}" +{% endif %} + image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: false + restartPolicy: Always + securityContext: + seLinuxOptions: {} + serviceAccountName: weave-net + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: weavedb + hostPath: + path: /var/lib/weave + - name: cni-bin + hostPath: + path: /opt + - name: cni-bin2 + hostPath: + path: /home + - name: cni-conf + hostPath: + path: /etc + - name: cni-machine-id + hostPath: + path: /etc/machine-id + - name: dbus + hostPath: + path: /var/lib/dbus + - name: lib-modules + hostPath: + path: /lib/modules + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + priorityClassName: system-node-critical + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate + - apiVersion: v1 + kind: Secret + metadata: + name: weave-net + namespace: kube-system + data: + WEAVE_PASSWORD: "{{ weave_password | default("") | b64encode }}" From 4d83a9cc8844a349522c05fe56992e0b0b09d7aa Mon Sep 17 00:00:00 2001 From: Brian Vo Date: Sun, 19 Oct 2025 16:34:25 +0700 Subject: [PATCH 10/10] add crio registry --- inventory/2SpeedLab/group_vars/all/cri-o.yml | 16 ++++++++++++++++ .../group_vars/k8s_cluster/k8s-cluster.yml | 2 +- .../group_vars/k8s_cluster/k8s-net-cilium.yml | 4 ++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/inventory/2SpeedLab/group_vars/all/cri-o.yml b/inventory/2SpeedLab/group_vars/all/cri-o.yml index 757dab84c93..0bf5d6d1d54 100644 --- a/inventory/2SpeedLab/group_vars/all/cri-o.yml +++ b/inventory/2SpeedLab/group_vars/all/cri-o.yml @@ -7,3 +7,19 @@ # - registry: 10.0.0.2:5000 # username: user # password: pass +crio_registries: + - prefix: docker.io + insecure: false + blocked: false + location: docker.io + unqualified: true + - prefix: quay.io + insecure: false + blocked: false + location: quay.io + unqualified: true + +crio_unqualified_search_registries: + - docker.io + - quay.io + - gcr.io diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml index 89f047d8368..84663ee1369 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -286,7 +286,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" ## Supplementary addresses that can be added in kubernetes ssl keys. ## That can be useful for example to setup a keepalived virtual IP -supplementary_addresses_in_ssl_keys: [10.10.24.105] +supplementary_addresses_in_ssl_keys: [10.10.24.105, 10.10.24.109, 10.10.25.27, 10.10.25.74, rancher1.tabbycatlab.dev] ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. ## See https://github.com/kubernetes-sigs/kubespray/issues/2141 diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml index df864eba826..f1487287230 100644 --- a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -42,6 +42,10 @@ cilium_cpu_requests: 100m # Overlay Network Mode cilium_tunnel_mode: vxlan +cilium_ingress_enabled: true +cilium_ingress_default: true +cilium_ingress_load_balancer_mode: "dedicated" + # LoadBalancer Mode (snat/dsr/hybrid) Ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#dsr-mode # cilium_loadbalancer_mode: snat