diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml deleted file mode 100644 index 39411ce99b2..00000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.yaml +++ /dev/null @@ -1,147 +0,0 @@ ---- -name: Bug Report -description: Report a bug encountered while using Kubespray -labels: kind/bug -body: - - type: markdown - attributes: - value: | - Please, be ready for followup questions, and please respond in a timely - manner. If we can't reproduce a bug or think a feature already exists, we - might close your issue. If we're wrong, PLEASE feel free to reopen it and - explain why. - - type: textarea - id: problem - attributes: - label: What happened? - description: | - Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. - validations: - required: true - - type: textarea - id: expected - attributes: - label: What did you expect to happen? - validations: - required: true - - - type: textarea - id: repro - attributes: - label: How can we reproduce it (as minimally and precisely as possible)? - validations: - required: true - - - type: markdown - attributes: - value: '### Environment' - - - type: dropdown - id: os - attributes: - label: OS - options: - - 'RHEL 9' - - 'RHEL 8' - - 'Fedora 40' - - 'Ubuntu 24' - - 'Ubuntu 22' - - 'Ubuntu 20' - - 'Debian 12' - - 'Debian 11' - - 'Flatcar Container Linux' - - 'openSUSE Leap' - - 'openSUSE Tumbleweed' - - 'Oracle Linux 9' - - 'Oracle Linux 8' - - 'AlmaLinux 9' - - 'AlmaLinux 8' - - 'Rocky Linux 9' - - 'Rocky Linux 8' - - 'Amazon Linux 2' - - 'Kylin Linux Advanced Server V10' - - 'UOS Linux 20' - - 'openEuler 24' - - 'openEuler 22' - - 'openEuler 20' - - 'Other|Unsupported' - validations: - required: true - - - type: textarea - id: ansible_version - attributes: - label: Version of Ansible - placeholder: 'ansible --version' - validations: - required: true - - - type: input - id: python_version - attributes: - label: Version of Python - placeholder: 'python --version' - validations: - required: true - - - type: input - id: kubespray_version - attributes: - label: Version of Kubespray (commit) - placeholder: 'git rev-parse --short HEAD' - validations: - required: true - - - type: dropdown - id: network_plugin - attributes: - label: Network plugin used - options: - - calico - - cilium - - cni - - custom_cni - - flannel - - kube-ovn - - kube-router - - macvlan - - meta - - multus - - ovn4nfv - validations: - required: true - - - type: textarea - id: inventory - attributes: - label: Full inventory with variables - placeholder: 'ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"' - description: We recommend using snippets services like https://gist.github.com/ etc. - validations: - required: true - - - type: input - id: ansible_command - attributes: - label: Command used to invoke ansible - validations: - required: true - - - type: textarea - id: ansible_output - attributes: - label: Output of ansible run - description: We recommend using snippets services like https://gist.github.com/ etc. - validations: - required: true - - - type: textarea - id: anything_else - attributes: - label: Anything else we need to know - description: | - By running scripts/collect-info.yaml you can get a lot of useful informations. - Script can be started by: - ansible-playbook -i -u -e ansible_ssh_user= -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml - (If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python'). - After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 2ef2e3760da..00000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -blank_issues_enabled: false -contact_links: - - name: Support Request - url: https://kubernetes.slack.com/channels/kubespray - about: Support request or question relating to Kubernetes diff --git a/.github/ISSUE_TEMPLATE/enhancement.yaml b/.github/ISSUE_TEMPLATE/enhancement.yaml deleted file mode 100644 index c0232069e9b..00000000000 --- a/.github/ISSUE_TEMPLATE/enhancement.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Enhancement Request -description: Suggest an enhancement to the Kubespray project -labels: kind/feature -body: - - type: markdown - attributes: - value: Please only use this template for submitting enhancement requests - - type: textarea - id: what - attributes: - label: What would you like to be added - validations: - required: true - - type: textarea - id: why - attributes: - label: Why is this needed - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/failing-test.yaml b/.github/ISSUE_TEMPLATE/failing-test.yaml deleted file mode 100644 index 94eb1bb784e..00000000000 --- a/.github/ISSUE_TEMPLATE/failing-test.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: Failing Test -description: Report test failures in Kubespray CI jobs -labels: kind/failing-test -body: - - type: markdown - attributes: - value: Please only use this template for submitting reports about failing tests in Kubespray CI jobs - - type: textarea - id: failing_jobs - attributes: - label: Which jobs are failing ? - validations: - required: true - - - type: textarea - id: failing_tests - attributes: - label: Which tests are failing ? - validations: - required: true - - - type: input - id: since_when - attributes: - label: Since when has it been failing ? - validations: - required: true - - - type: textarea - id: failure_reason - attributes: - label: Reason for failure - description: If you don't know and have no guess, just put "Unknown" - validations: - required: true - - - type: textarea - id: anything_else - attributes: - label: Anything else we need to know diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 2a4d3c865af..00000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,44 +0,0 @@ - - -**What type of PR is this?** -> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: -> -> /kind api-change -> /kind bug -> /kind cleanup -> /kind design -> /kind documentation -> /kind failing-test -> /kind feature -> /kind flake - -**What this PR does / why we need it**: - -**Which issue(s) this PR fixes**: - -Fixes # - -**Special notes for your reviewer**: - -**Does this PR introduce a user-facing change?**: - -```release-note - -``` diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 74b909c05b9..00000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "pip" - directory: "/" - schedule: - interval: "weekly" - labels: - - dependencies - - release-note-none - groups: - molecule: - patterns: - - molecule - - molecule-plugins* - - package-ecosystem: "github-actions" - directory: "/" - labels: - - release-note-none - - ci-short - schedule: - interval: "weekly" diff --git a/.github/workflows/auto-label-os.yml b/.github/workflows/auto-label-os.yml deleted file mode 100644 index f9ebb3ed5c2..00000000000 --- a/.github/workflows/auto-label-os.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Issue labeler -on: - issues: - types: [opened] - -permissions: - contents: read - -jobs: - label-component: - runs-on: ubuntu-latest - permissions: - issues: write - - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - - - name: Parse issue form - uses: stefanbuck/github-issue-parser@2ea9b35a8c584529ed00891a8f7e41dc46d0441e - id: issue-parser - with: - template-path: .github/ISSUE_TEMPLATE/bug-report.yaml - - - name: Set labels based on OS field - uses: redhat-plumbers-in-action/advanced-issue-labeler@0db433d412193574252480b4fc22f2e4319a4ea3 - with: - issue-form: ${{ steps.issue-parser.outputs.jsonString }} - section: os - block-list: | - None - Other - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/upgrade-patch-versions-schedule.yml b/.github/workflows/upgrade-patch-versions-schedule.yml deleted file mode 100644 index 470deb58151..00000000000 --- a/.github/workflows/upgrade-patch-versions-schedule.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Upgrade Kubespray components with new patches versions - all branches - -on: - schedule: - - cron: '22 2 * * *' # every day, 02:22 UTC - workflow_dispatch: - -permissions: {} -jobs: - get-releases-branches: - if: github.repository == 'kubernetes-sigs/kubespray' - runs-on: ubuntu-latest - outputs: - branches: ${{ steps.get-branches.outputs.data }} - steps: - - uses: octokit/graphql-action@8ad880e4d437783ea2ab17010324de1075228110 - id: get-branches - with: - query: | - query get_release_branches($owner:String!, $name:String!) { - repository(owner:$owner, name:$name) { - refs(refPrefix: "refs/heads/", - first: 1, # TODO increment once we have release branch with the new checksums format - query: "release-", - orderBy: { - field: ALPHABETICAL, - direction: DESC - }) { - nodes { - name - } - } - } - } - variables: | - owner: ${{ github.repository_owner }} - name: ${{ github.event.repository.name }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - update-versions: - needs: get-releases-branches - strategy: - fail-fast: false - matrix: - branch: - - name: ${{ github.event.repository.default_branch }} - - ${{ fromJSON(needs.get-releases-branches.outputs.branches).repository.refs.nodes }} - uses: ./.github/workflows/upgrade-patch-versions.yml - permissions: - contents: write - pull-requests: write - name: Update patch updates on ${{ matrix.branch.name }} - with: - branch: ${{ matrix.branch.name }} diff --git a/.github/workflows/upgrade-patch-versions.yml b/.github/workflows/upgrade-patch-versions.yml deleted file mode 100644 index abb41cd4cb0..00000000000 --- a/.github/workflows/upgrade-patch-versions.yml +++ /dev/null @@ -1,44 +0,0 @@ -on: - workflow_call: - inputs: - branch: - description: Which branch to update with new patch versions - default: master - required: true - type: string - -jobs: - update-patch-versions: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - with: - ref: ${{ inputs.branch }} - - uses: actions/setup-python@v5 - with: - python-version: '3.13' - cache: 'pip' - - run: pip install scripts/component_hash_update pre-commit - - run: update-hashes - env: - API_KEY: ${{ secrets.GITHUB_TOKEN }} - - uses: actions/cache@v4 - with: - key: pre-commit-hook-propagate - path: | - ~/.cache/pre-commit - - run: pre-commit run --all-files propagate-ansible-variables - continue-on-error: true - - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e - with: - commit-message: Patch versions updates - title: Patch versions updates - ${{ inputs.branch }} - labels: bot - branch: component_hash_update/${{ inputs.branch }} - sign-commits: true - body: | - /kind feature - - ```release-note - NONE - ``` diff --git a/.gitignore b/.gitignore index fa68d5606e9..43cc0d7c330 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ contrib/offline/offline-files.tar.gz .idea .vscode .tox -.cache +.cacheheme *.bak *.tfstate *.tfstate*backup @@ -25,10 +25,10 @@ vagrant/ plugins/mitogen # Ansible inventory -inventory/* !inventory/local !inventory/sample inventory/*/artifacts/ +!inventory/2SpeedLab/inventory.ini # Byte-compiled / optimized / DLL files __pycache__/ @@ -120,3 +120,4 @@ tmp.md # Ansible collection files kubernetes_sigs-kubespray*tar.gz ansible_collections +inventory/2SpeedLab/inventory.ini diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index 24e3876985d..00000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -stages: - - build - - test - - deploy-part1 - - deploy-extended - -variables: - FAILFASTCI_NAMESPACE: 'kargo-ci' - GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray' - GIT_CONFIG_COUNT: 2 - GIT_CONFIG_KEY_0: user.email - GIT_CONFIG_VALUE_0: "ci@kubespray.io" - GIT_CONFIG_KEY_1: user.name - GIT_CONFIG_VALUE_1: "Kubespray CI" - ANSIBLE_FORCE_COLOR: "true" - MAGIC: "ci check this" - GS_ACCESS_KEY_ID: $GS_KEY - GS_SECRET_ACCESS_KEY: $GS_SECRET - CONTAINER_ENGINE: docker - GCE_PREEMPTIBLE: "false" - ANSIBLE_KEEP_REMOTE_FILES: "1" - ANSIBLE_CONFIG: ./tests/ansible.cfg - ANSIBLE_REMOTE_USER: kubespray - ANSIBLE_PRIVATE_KEY_FILE: /tmp/id_rsa - ANSIBLE_INVENTORY: /tmp/inventory - ANSIBLE_STDOUT_CALLBACK: "debug" - RESET_CHECK: "false" - REMOVE_NODE_CHECK: "false" - UPGRADE_TEST: "false" - MITOGEN_ENABLE: "false" - ANSIBLE_VERBOSITY: 2 - RECOVER_CONTROL_PLANE_TEST: "false" - RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]" - OPENTOFU_VERSION: v1.9.1 - PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - -before_script: - - ./tests/scripts/rebase.sh - - mkdir -p cluster-dump $ANSIBLE_INVENTORY - -.job: &job - tags: - - ffci - image: $PIPELINE_IMAGE - artifacts: - when: always - paths: - - cluster-dump/ - needs: - - pipeline-image - -.job-moderated: - extends: .job - needs: - - pipeline-image - - pre-commit # lint - - vagrant-validate # lint - -include: - - .gitlab-ci/build.yml - - .gitlab-ci/lint.yml - - .gitlab-ci/terraform.yml - - .gitlab-ci/kubevirt.yml - - .gitlab-ci/vagrant.yml - - .gitlab-ci/molecule.yml diff --git a/.gitlab-ci/build.yml b/.gitlab-ci/build.yml deleted file mode 100644 index 92304a2e388..00000000000 --- a/.gitlab-ci/build.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -pipeline-image: - cache: - key: $CI_COMMIT_REF_SLUG - paths: - - image-cache - tags: - - ffci - stage: build - image: moby/buildkit:rootless - variables: - BUILDKITD_FLAGS: --oci-worker-no-process-sandbox - CACHE_IMAGE: $CI_REGISTRY_IMAGE/pipeline:cache - # TODO: remove the override - # currently rebase.sh depends on bash (not available in the kaniko image) - # once we have a simpler rebase (which should be easy if the target branch ref is available as variable - # we'll be able to rebase here as well hopefully - before_script: - - mkdir -p ~/.docker - - echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > ~/.docker/config.json - script: - - | - buildctl-daemonless.sh build \ - --frontend dockerfile.v0 \ - --local context=$CI_PROJECT_DIR \ - --local dockerfile=$CI_PROJECT_DIR \ - --opt filename=pipeline.Dockerfile \ - --export-cache type=registry,ref=$CACHE_IMAGE \ - --import-cache type=registry,ref=$CACHE_IMAGE \ - --output type=image,name=$PIPELINE_IMAGE,push=true diff --git a/.gitlab-ci/kubevirt.yml b/.gitlab-ci/kubevirt.yml deleted file mode 100644 index c5ac51acc4e..00000000000 --- a/.gitlab-ci/kubevirt.yml +++ /dev/null @@ -1,153 +0,0 @@ ---- -.kubevirt: - extends: .job-moderated - interruptible: true - script: - - ansible-playbook tests/cloud_playbooks/create-kubevirt.yml - -c local -e @"tests/files/${TESTCASE}.yml" - - ./tests/scripts/testcases_run.sh - variables: - ANSIBLE_TIMEOUT: "120" - tags: - - ffci - needs: - - pipeline-image - -# TODO: generate testcases matrixes from the files in tests/files/ -# this is needed to avoid the need for PR rebasing when a job was added or removed in the target branch -# (currently, a removed job in the target branch breaks the tests, because the -# pipeline definition is parsed by gitlab before the rebase.sh script) -# CI template for PRs -pr: - stage: deploy-part1 - rules: - - if: $PR_LABELS =~ /.*ci-short.*/ - when: manual - allow_failure: true - - if: $CI_COMMIT_BRANCH =~ /^pr-.*$/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - extends: .kubevirt - parallel: - matrix: - - TESTCASE: - - almalinux9-crio - - almalinux9-kube-ovn - - debian11-calico-collection - - debian11-macvlan - - debian12-cilium - - debian13-cilium - - fedora39-kube-router - - openeuler24-calico - - rockylinux9-cilium - - ubuntu22-calico-all-in-one - - ubuntu22-calico-all-in-one-upgrade - - ubuntu24-calico-etcd-datastore - - ubuntu24-calico-all-in-one-hardening - - ubuntu24-cilium-sep - - ubuntu24-flannel-collection - - ubuntu24-kube-router-sep - - ubuntu24-kube-router-svc-proxy - - ubuntu24-ha-separate-etcd - - flatcar4081-calico - - fedora40-flannel-crio-collection-scale - -# The ubuntu24-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken -ubuntu24-calico-all-in-one: - stage: deploy-part1 - extends: .kubevirt - variables: - TESTCASE: ubuntu24-calico-all-in-one - rules: - - if: $CI_COMMIT_BRANCH =~ /^pr-.*$/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - -pr_full: - extends: .kubevirt - stage: deploy-extended - rules: - - if: $PR_LABELS =~ /.*ci-full.*/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - # Else run as manual - - when: manual - allow_failure: true - parallel: - matrix: - - TESTCASE: - - almalinux9-calico-ha-ebpf - - almalinux9-calico-nodelocaldns-secondary - - debian11-custom-cni - - debian11-kubelet-csr-approver - - debian12-custom-cni-helm - - fedora39-calico-swap-selinux - - fedora39-crio - - ubuntu24-calico-ha-wireguard - - ubuntu24-flannel-ha - - ubuntu24-flannel-ha-once - -# Need an update of the container image to use schema v2 -# update: quay.io/kubespray/vm-amazon-linux-2:latest -manual: - extends: pr_full - parallel: - matrix: - - TESTCASE: - - amazon-linux-2-all-in-one - rules: - - when: manual - allow_failure: true - -pr_extended: - extends: .kubevirt - stage: deploy-extended - rules: - - if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - parallel: - matrix: - - TESTCASE: - - almalinux9-calico - - almalinux9-calico-remove-node - - almalinux9-docker - - debian11-docker - - debian12-calico - - debian12-docker - - debian13-calico - - rockylinux9-calico - - ubuntu22-all-in-one-docker - - ubuntu24-all-in-one-docker - - ubuntu24-calico-all-in-one - - ubuntu24-calico-etcd-kubeadm - - ubuntu24-flannel - -# TODO: migrate to pr-full, fix the broken ones -periodic: - allow_failure: true - extends: .kubevirt - rules: - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - parallel: - matrix: - - TESTCASE: - - debian11-calico-upgrade - - debian11-calico-upgrade-once - - debian12-cilium-svc-proxy - - fedora39-calico-selinux - - fedora40-docker-calico - - ubuntu24-calico-etcd-kubeadm-upgrade-ha - - ubuntu24-calico-ha-recover - - ubuntu24-calico-ha-recover-noquorum diff --git a/.gitlab-ci/lint.yml b/.gitlab-ci/lint.yml deleted file mode 100644 index 809ad09ab9e..00000000000 --- a/.gitlab-ci/lint.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -pre-commit: - stage: test - tags: - - ffci - image: 'ghcr.io/pre-commit-ci/runner-image@sha256:fe01a6ec51b298412990b88627c3973b1146c7304f930f469bafa29ba60bcde9' - variables: - PRE_COMMIT_HOME: ${CI_PROJECT_DIR}/.cache/pre-commit - ANSIBLE_STDOUT_CALLBACK: default - script: - - pre-commit run --all-files --show-diff-on-failure - cache: - key: pre-commit-2 - paths: - - ${PRE_COMMIT_HOME} - when: 'always' - needs: [] - -vagrant-validate: - extends: .job - stage: test - tags: [ffci] - variables: - VAGRANT_VERSION: 2.3.7 - script: - - ./tests/scripts/vagrant-validate.sh diff --git a/.gitlab-ci/molecule.yml b/.gitlab-ci/molecule.yml deleted file mode 100644 index 23431566bf9..00000000000 --- a/.gitlab-ci/molecule.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -.molecule: - tags: [ffci] - rules: # run on ci-short as well - - if: $CI_COMMIT_BRANCH =~ /^pr-.*$/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - stage: deploy-part1 - image: $PIPELINE_IMAGE - needs: - - pipeline-image - script: - - ./tests/scripts/molecule_run.sh - after_script: - - rm -fr molecule_logs - - mkdir -p molecule_logs - - find ~/.cache/molecule/ \( -name '*.out' -o -name '*.err' \) -type f | xargs tar -uf molecule_logs/molecule.tar - - gzip molecule_logs/molecule.tar - artifacts: - when: always - paths: - - molecule_logs/ - -molecule: - extends: .molecule - script: - - ./tests/scripts/molecule_run.sh -i $ROLE - parallel: - matrix: - - ROLE: - - container-engine/cri-dockerd - - container-engine/containerd - - container-engine/cri-o - - container-engine/gvisor - - container-engine/youki - - adduser - - bastion-ssh-config - - bootstrap_os - -molecule_full: - allow_failure: true - rules: - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - extends: molecule - parallel: - matrix: - - ROLE: - # FIXME : tests below are perma-failing - - container-engine/kata-containers diff --git a/.gitlab-ci/terraform.yml b/.gitlab-ci/terraform.yml deleted file mode 100644 index b5d19946425..00000000000 --- a/.gitlab-ci/terraform.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -# Tests for contrib/terraform/ -.terraform_install: - extends: .job - needs: - - pipeline-image - variables: - TF_VAR_public_key_path: "${ANSIBLE_PRIVATE_KEY_FILE}.pub" - TF_VAR_ssh_private_key_path: $ANSIBLE_PRIVATE_KEY_FILE - CLUSTER: $CI_COMMIT_REF_NAME - TERRAFORM_STATE_ROOT: $CI_PROJECT_DIR - stage: deploy-part1 - before_script: - - ./tests/scripts/rebase.sh - - mkdir -p cluster-dump $ANSIBLE_INVENTORY - - ./tests/scripts/opentofu_install.sh - - cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars . - - ln -rs -t $ANSIBLE_INVENTORY contrib/terraform/$PROVIDER/hosts - - tofu -chdir="contrib/terraform/$PROVIDER" init - -terraform_validate: - extends: .terraform_install - tags: [ffci] - only: ['master', /^pr-.*$/] - script: - - tofu -chdir="contrib/terraform/$PROVIDER" validate - - tofu -chdir="contrib/terraform/$PROVIDER" fmt -check -diff - stage: test - needs: - - pipeline-image - parallel: - matrix: - - PROVIDER: - - openstack - - aws - - exoscale - - hetzner - - vsphere - - upcloud - - nifcloud - -.terraform_apply: - extends: .terraform_install - tags: [ffci] - stage: deploy-extended - when: manual - only: [/^pr-.*$/] - variables: - ANSIBLE_INVENTORY_UNPARSED_FAILED: "true" - ANSIBLE_REMOTE_USER: ubuntu # the openstack terraform module does not handle custom user correctly - ANSIBLE_SSH_RETRIES: 15 - TF_VAR_ssh_user: $ANSIBLE_REMOTE_USER - TF_VAR_cluster_name: $CI_JOB_ID - script: - # Set Ansible config - - cp ansible.cfg ~/.ansible.cfg - - ssh-keygen -N '' -f $ANSIBLE_PRIVATE_KEY_FILE -t rsa - - mkdir -p contrib/terraform/$PROVIDER/group_vars - # Random subnet to avoid routing conflicts - - export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24" - - tofu -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1 - - tests/scripts/testcases_run.sh - after_script: - # Cleanup regardless of exit code - - tofu -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve - -# Elastx is generously donating resources for Kubespray on Openstack CI -# Contacts: @gix @bl0m1 -.elastx_variables: &elastx_variables - OS_AUTH_URL: https://ops.elastx.cloud:5000 - OS_PROJECT_ID: 564c6b461c6b44b1bb19cdb9c2d928e4 - OS_PROJECT_NAME: kubespray_ci - OS_USER_DOMAIN_NAME: Default - OS_PROJECT_DOMAIN_ID: default - OS_USERNAME: kubespray@root314.com - OS_REGION_NAME: se-sto - OS_INTERFACE: public - OS_IDENTITY_API_VERSION: "3" - TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df" - -tf-elastx_cleanup: - tags: [ffci] - image: python - variables: - <<: *elastx_variables - before_script: - - pip install -r scripts/openstack-cleanup/requirements.txt - script: - - ./scripts/openstack-cleanup/main.py - allow_failure: true - -tf-elastx_ubuntu20-calico: - extends: .terraform_apply - stage: deploy-part1 - when: on_success - allow_failure: true - variables: - <<: *elastx_variables - PROVIDER: openstack - ANSIBLE_TIMEOUT: "60" - TF_VAR_number_of_k8s_masters: "1" - TF_VAR_number_of_k8s_masters_no_floating_ip: "0" - TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0" - TF_VAR_number_of_etcd: "0" - TF_VAR_number_of_k8s_nodes: "1" - TF_VAR_number_of_k8s_nodes_no_floating_ip: "0" - TF_VAR_number_of_gfs_nodes_no_floating_ip: "0" - TF_VAR_number_of_bastions: "0" - TF_VAR_number_of_k8s_masters_no_etcd: "0" - TF_VAR_floatingip_pool: "elx-public1" - TF_VAR_dns_nameservers: '["1.1.1.1", "8.8.8.8", "8.8.4.4"]' - TF_VAR_use_access_ip: "0" - TF_VAR_external_net: "600b8501-78cb-4155-9c9f-23dfcba88828" - TF_VAR_network_name: "ci-$CI_JOB_ID" - TF_VAR_az_list: '["sto1"]' - TF_VAR_az_list_node: '["sto1"]' - TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2 - TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2 - TF_VAR_image: ubuntu-20.04-server-latest - TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]' diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml deleted file mode 100644 index cc29a98e658..00000000000 --- a/.gitlab-ci/vagrant.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -vagrant: - extends: .job-moderated - variables: - CI_PLATFORM: "vagrant" - SSH_USER: "vagrant" - VAGRANT_DEFAULT_PROVIDER: "libvirt" - KUBESPRAY_VAGRANT_CONFIG: tests/files/${TESTCASE}.rb - DOCKER_NAME: vagrant - VAGRANT_ANSIBLE_TAGS: facts - VAGRANT_HOME: "$CI_PROJECT_DIR/.vagrant.d" - PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" - tags: [ffci-vm-large] - image: quay.io/kubespray/vm-kubespray-ci:v13 - services: [] - before_script: - - echo $USER - - python3 -m venv citest - - source citest/bin/activate - - vagrant plugin expunge --reinstall --force --no-tty - - vagrant plugin install vagrant-libvirt - - pip install --no-compile --no-cache-dir pip -U - - pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt - - pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt - - ./tests/scripts/vagrant_clean.sh - script: - - vagrant up - - ./tests/scripts/testcases_run.sh - after_script: - - vagrant destroy -f - cache: - key: $CI_JOB_NAME_SLUG - paths: - - .vagrant.d/boxes - - .cache/pip - policy: pull-push # TODO: change to "pull" when not on main - stage: deploy-extended - rules: - - if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/ - when: on_success - - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_PIPELINE_SCHEDULE_DESCRIPTION == "daily-ci" - when: on_success - - when: manual - allow_failure: true - parallel: - matrix: - - TESTCASE: - - ubuntu24-calico-dual-stack - - ubuntu24-calico-ipv6only-stack diff --git a/.mdlrc b/.mdlrc deleted file mode 100644 index 8ca55a8cee5..00000000000 --- a/.mdlrc +++ /dev/null @@ -1 +0,0 @@ -style "#{File.dirname(__FILE__)}/.md_style.rb" diff --git a/.nojekyll b/.nojekyll deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 13a4f6171db..00000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1 +0,0 @@ -# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases) diff --git a/CNAME b/CNAME deleted file mode 100644 index e5bd1ffa1bb..00000000000 --- a/CNAME +++ /dev/null @@ -1 +0,0 @@ -kubespray.io diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 08f2f947589..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,47 +0,0 @@ -# Contributing guidelines - -## How to become a contributor and submit your own code - -### Environment setup - -It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications) - -To install development dependencies you can set up a python virtual env with the necessary dependencies: - -```ShellSession -virtualenv venv -source venv/bin/activate -pip install -r tests/requirements.txt -ansible-galaxy install -r tests/requirements.yml -``` - -#### Linting - -Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR. - -```ShellSession -pre-commit install -pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified -``` - -#### Molecule - -[molecule](https://github.com/ansible-community/molecule) is designed to help the development and testing of Ansible roles. In Kubespray you can run it all for all roles with `./tests/scripts/molecule_run.sh` or for a specific role (that you are working with) with `molecule test` from the role directory (`cd roles/my-role`). - -When developing or debugging a role it can be useful to run `molecule create` and `molecule converge` separately. Then you can use `molecule login` to SSH into the test environment. - -#### Vagrant - -Vagrant with VirtualBox or libvirt driver helps you to quickly spin test clusters to test things end to end. See [README.md#vagrant](README.md) - -### Contributing A Patch - -1. Submit an issue describing your proposed change to the repo in question. -2. The [repo owners](OWNERS) will respond to your issue promptly. -3. Fork the desired repo, develop and test your code changes. -4. Install [pre-commit](https://pre-commit.com) and install it in your development repo. -5. Addess any pre-commit validation failures. -6. Sign the CNCF CLA () -7. Submit a pull request. -8. Work with the reviewers on their suggestions. -9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits () before final merger of your contribution. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 0b540e19a34..00000000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Kubespray - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/OWNERS b/OWNERS deleted file mode 100644 index e4ac17f4556..00000000000 --- a/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - kubespray-approvers -reviewers: - - kubespray-reviewers -emeritus_approvers: - - kubespray-emeritus_approvers diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES deleted file mode 100644 index ff747044e1a..00000000000 --- a/OWNERS_ALIASES +++ /dev/null @@ -1,27 +0,0 @@ -aliases: - kubespray-approvers: - - ant31 - - mzaian - - tico88612 - - vannten - - yankay - kubespray-reviewers: - - cyclinder - - erikjiang - - mrfreezeex - - mzaian - - tico88612 - - vannten - - yankay - kubespray-emeritus_approvers: - - atoms - - chadswen - - cristicalin - - floryut - - liupeng0518 - - luckysb - - mattymo - - miouge1 - - oomichi - - riverzhang - - woopstar diff --git a/README.md b/README.md index cb8ed2a876a..0fe3c982e85 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,9 @@ # Deploy a Production Ready Kubernetes Cluster -![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png) - -If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. -You can get your invite [here](http://slack.k8s.io/) - -- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_controllers/openstack.md), [vSphere](docs/cloud_controllers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal** +## This is the k8s version for 2SpeedLab - **Highly available** cluster - **Composable** (Choice of the network plugin for instance) - Supports most popular **Linux distributions** -- **Continuous integration tests** ## Quick Start @@ -27,202 +21,16 @@ docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inve ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml ``` -### Ansible - -#### Usage - -See [Getting started](/docs/getting_started/getting-started.md) - -#### Collection - -See [here](docs/ansible/ansible_collection.md) if you wish to use this repository as an Ansible collection - -### Vagrant - -For Vagrant we need to install Python dependencies for provisioning tasks. -Check that ``Python`` and ``pip`` are installed: - -```ShellSession -python -V && pip -V -``` - -If this returns the version of the software, you're good to go. If not, download and install Python from here - -Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible) -then run the following step: - -```ShellSession -vagrant up -``` - -## Documents - -- [Requirements](#requirements) -- [Kubespray vs ...](docs/getting_started/comparisons.md) -- [Getting started](docs/getting_started/getting-started.md) -- [Setting up your first cluster](docs/getting_started/setting-up-your-first-cluster.md) -- [Ansible inventory and tags](docs/ansible/ansible.md) -- [Integration with existing ansible repo](docs/operations/integration.md) -- [Deployment data variables](docs/ansible/vars.md) -- [DNS stack](docs/advanced/dns-stack.md) -- [HA mode](docs/operations/ha-mode.md) -- [Network plugins](#network-plugins) -- [Vagrant install](docs/developers/vagrant.md) -- [Flatcar Container Linux bootstrap](docs/operating_systems/flatcar.md) -- [Fedora CoreOS bootstrap](docs/operating_systems/fcos.md) -- [openSUSE setup](docs/operating_systems/opensuse.md) -- [Downloaded artifacts](docs/advanced/downloads.md) -- [Equinix Metal](docs/cloud_providers/equinix-metal.md) -- [OpenStack](docs/cloud_controllers/openstack.md) -- [vSphere](docs/cloud_controllers/vsphere.md) -- [Large deployments](docs/operations/large-deployments.md) -- [Adding/replacing a node](docs/operations/nodes.md) -- [Upgrades basics](docs/operations/upgrades.md) -- [Air-Gap installation](docs/operations/offline-environment.md) -- [NTP](docs/advanced/ntp.md) -- [Hardening](docs/operations/hardening.md) -- [Mirror](docs/operations/mirror.md) -- [Roadmap](docs/roadmap/roadmap.md) - -## Supported Linux Distributions - -- **Flatcar Container Linux by Kinvolk** -- **Debian** Bookworm, Bullseye, Trixie -- **Ubuntu** 22.04, 24.04 -- **CentOS/RHEL** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Fedora** 39, 40 -- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md)) -- **openSUSE** Leap 15.x/Tumbleweed -- **Oracle Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Alma Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Rocky Linux** [8, 9](docs/operating_systems/rhel.md#rhel-8) -- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md)) -- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md)) -- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md)) -- **openEuler** (experimental: see [openEuler notes](docs/operating_systems/openeuler.md)) - -Note: - -- Upstart/SysV init based OS types are not supported. -- [Kernel requirements](docs/operations/kernel-requirements.md) (please read if the OS kernel version is < 4.19). - -## Supported Components - - - -- Core - - [kubernetes](https://github.com/kubernetes/kubernetes) 1.33.4 - - [etcd](https://github.com/etcd-io/etcd) 3.5.22 - - [docker](https://www.docker.com/) 28.3 - - [containerd](https://containerd.io/) 2.1.4 - - [cri-o](http://cri-o.io/) 1.33.3 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS) -- Network Plugin - - [cni-plugins](https://github.com/containernetworking/plugins) 1.4.1 - - [calico](https://github.com/projectcalico/calico) 3.29.5 - - [cilium](https://github.com/cilium/cilium) 1.17.7 - - [flannel](https://github.com/flannel-io/flannel) 0.26.7 - - [kube-ovn](https://github.com/alauda/kube-ovn) 1.12.21 - - [kube-router](https://github.com/cloudnativelabs/kube-router) 2.1.1 - - [multus](https://github.com/k8snetworkplumbingwg/multus-cni) 4.2.2 - - [kube-vip](https://github.com/kube-vip/kube-vip) 0.8.0 -- Application - - [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3 - - [coredns](https://github.com/coredns/coredns) 1.12.0 - - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1 - - [argocd](https://argoproj.github.io/) 2.14.5 - - [helm](https://helm.sh/) 3.18.4 - - [metallb](https://metallb.universe.tf/) 0.13.9 - - [registry](https://github.com/distribution/distribution) 2.8.1 -- Storage Plugin - - [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) 0.5.0 - - [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) 1.10.0 - - [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) 1.30.0 - - [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) 1.9.2 - - [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24 - - [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0 - - [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4 - - - -## Container Runtime Notes - -- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20) - -## Requirements - -- **Minimum required version of Kubernetes is v1.30** -- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands** -- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md)) -- The target servers are configured to allow **IPv4 forwarding**. -- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**. -- The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - in order to avoid any issue during deployment you should disable your firewall. -- If kubespray is run from non-root user account, correct privilege escalation method - should be configured in the target servers. Then the `ansible_become` flag - or command parameters `--become or -b` should be specified. - -Hardware: -These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide. - -- Control Plane - - Memory: 2 GB -- Worker Node - - Memory: 1 GB - -## Network Plugins - -You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`) - -- [flannel](docs/CNI/flannel.md): gre/vxlan (layer 2) networking. - -- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options - designed to give you the most efficient networking across a range of situations, including non-overlay - and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts, - pods, and (if using Istio and Envoy) applications at the service mesh layer. - -- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic. - -- [kube-ovn](docs/CNI/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises. - -- [kube-router](docs/CNI/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational - simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy), - iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers). - It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs. - -- [macvlan](docs/CNI/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network. - -- [multus](docs/CNI/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc. - -- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray. - See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart. - -The network plugin to use is defined by the variable `kube_network_plugin`. There is also an -option to leverage built-in cloud provider networking instead. -See also [Network checker](docs/advanced/netcheck.md). - -## Ingress Plugins - -- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller. - -- [metallb](docs/ingress/metallb.md): the MetalLB bare-metal service LoadBalancer provider. - -## Community docs and resources - -- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/) -- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr -- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty -- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=CJ5G4GpqDy0) - -## Tools and projects on top of Kubespray - -- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst) -- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform) -- [Kubean](https://github.com/kubean-io/kubean) - -## CI Tests - -[![Build graphs](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/badges/master/pipeline.svg)](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/pipelines) - -CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/). - -See the [test matrix](docs/developers/test_cases.md) for details. +## In K8S 2SpeedLab, includes: +- kubernetes 1.33.4 +- etcd 3.5.22 +- containerd 2.1.4 +- cilium 1.17.7 +- [cert-manager](https://github.com/jetstack/cert-manager) 1.15.3 +- [coredns](https://github.com/coredns/coredns) 1.12.0 +- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) 1.12.1 +- [helm](https://helm.sh/) 3.18.4 +- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 0.0.24 +- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) 2.5.0 +- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) 0.16.4 +- kube-vip 0.8.0 \ No newline at end of file diff --git a/RELEASE.md b/RELEASE.md deleted file mode 100644 index 13282ef4d02..00000000000 --- a/RELEASE.md +++ /dev/null @@ -1,85 +0,0 @@ -# Release Process - -The Kubespray Project is released on an as-needed basis. The process is as follows: - -1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325) -1. At least one of the [approvers](OWNERS_ALIASES) must approve this release -1. (Only for major releases) The `kube_version_min_required` variable is set to `n-1` -1. (Only for major releases) Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables. -1. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details. -1. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes -1. (Only for major releases) An approver creates a release branch in the form `release-X.Y` -1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request. -1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request. -1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details. -1. The release issue is closed -1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released` -1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...` -1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance) - -## Major/minor releases and milestones - -* For major releases (vX.Y) Kubespray maintains one branch (`release-X.Y`). Minor releases (vX.Y.Z) are available only as tags. - -* Security patches and bugs might be backported. - -* Fixes for major releases (vX.Y) and minor releases (vX.Y.Z) are delivered - via maintenance releases (vX.Y.Z) and assigned to the corresponding open - [GitHub milestone](https://github.com/kubernetes-sigs/kubespray/milestones). - That milestone remains open for the major/minor releases support lifetime, - which ends once the milestone is closed. Then only a next major or minor release - can be done. - -* Kubespray major and minor releases are bound to the given `kube_version` major/minor - version numbers and other components' arbitrary versions, like etcd or network plugins. - Older or newer component versions are not supported and not tested for the given - release (even if included in the checksum variables, like `kubeadm_checksums`). - -* There is no unstable releases and no APIs, thus Kubespray doesn't follow - [semver](https://semver.org/). Every version describes only a stable release. - Breaking changes, if any introduced by changed defaults or non-contrib ansible roles' - playbooks, shall be described in the release notes. Other breaking changes, if any in - the contributed addons or bound versions of Kubernetes and other components, are - considered out of Kubespray scope and are up to the components' teams to deal with and - document. - -* Minor releases can change components' versions, but not the major `kube_version`. - Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0 - is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: 3.0.6`, - then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1 - and *any* changes to other components, like etcd v4, or calico 1.2.3. - And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively. - -## Release note creation - -You can create a release note with: - -```shell -export GITHUB_TOKEN= -export ORG=kubernetes-sigs -export REPO=kubespray -release-notes --start-sha --end-sha --dependencies=false --output=/tmp/kubespray-release-note --required-author="" -``` - -If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.). -It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note - -## Container image creation - -The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory: - -```shell -cd kubespray/ -nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z . -nerdctl push quay.io/kubespray/kubespray:vX.Y.Z -``` - -The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/: - -```shell -cd kubespray/test-infra/vagrant-docker/ -./build vX.Y.Z -``` - -Please note that the above operation requires the permission to push container images into quay.io/kubespray/. -If you don't have the permission, please ask it on the #kubespray-dev channel. diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS deleted file mode 100644 index 5b743285438..00000000000 --- a/SECURITY_CONTACTS +++ /dev/null @@ -1,15 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Committee to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ -floryut -ant31 -VannTen -yankay diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 833ca95ee17..00000000000 --- a/Vagrantfile +++ /dev/null @@ -1,349 +0,0 @@ -# -*- mode: ruby -*- -# # vi: set ft=ruby : - -# For help on using kubespray with vagrant, check out docs/developers/vagrant.md - -require 'fileutils' -require 'ipaddr' -require 'socket' - -Vagrant.require_version ">= 2.0.0" - -CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb') - -FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json" - -# Uniq disk UUID for libvirt -DISK_UUID = Time.now.utc.to_i - -SUPPORTED_OS = { - "flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]}, - "flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]}, - "flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]}, - "flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]}, - "ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"}, - "ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"}, - "ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"}, - "centos8" => {box: "centos/8", user: "vagrant"}, - "centos8-bento" => {box: "bento/centos-8", user: "vagrant"}, - "almalinux8" => {box: "almalinux/8", user: "vagrant"}, - "almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"}, - "almalinux9" => {box: "almalinux/9", user: "vagrant"}, - "rockylinux8" => {box: "rockylinux/8", user: "vagrant"}, - "rockylinux9" => {box: "rockylinux/9", user: "vagrant"}, - "fedora39" => {box: "fedora/39-cloud-base", user: "vagrant"}, - "fedora40" => {box: "fedora/40-cloud-base", user: "vagrant"}, - "fedora39-arm64" => {box: "bento/fedora-39-arm64", user: "vagrant"}, - "fedora40-arm64" => {box: "bento/fedora-40", user: "vagrant"}, - "opensuse" => {box: "opensuse/Leap-15.6.x86_64", user: "vagrant"}, - "opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"}, - "oraclelinux" => {box: "generic/oracle7", user: "vagrant"}, - "oraclelinux8" => {box: "generic/oracle8", user: "vagrant"}, - "rhel8" => {box: "generic/rhel8", user: "vagrant"}, - "debian11" => {box: "debian/bullseye64", user: "vagrant"}, - "debian12" => {box: "debian/bookworm64", user: "vagrant"}, -} - -if File.exist?(CONFIG) - require CONFIG -end - -# Defaults for config options defined in CONFIG -$num_instances ||= 3 -$instance_name_prefix ||= "k8s" -$vm_gui ||= false -$vm_memory ||= 2048 -$vm_cpus ||= 2 -$shared_folders ||= {} -$forwarded_ports ||= {} -$subnet ||= "172.18.8" -$subnet_ipv6 ||= "fd3c:b398:0698:0756" -$os ||= "ubuntu2004" -$network_plugin ||= "flannel" -$inventories ||= [] -# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni -$multi_networking ||= "False" -$download_run_once ||= "True" -$download_force_cache ||= "False" -# Modify those to have separate groups (for instance, to test separate etcd:) -# first_control_plane = 1 -# first_etcd = 4 -# control_plane_instances = 3 -# etcd_instances = 3 -$first_node ||= 1 -$first_control_plane ||= 1 -$first_etcd ||= 1 - -# The first three nodes are etcd servers -$etcd_instances ||= [$num_instances, 3].min -# The first two nodes are kube masters -$control_plane_instances ||= [$num_instances, 2].min -# All nodes are kube nodes -$kube_node_instances ||= $num_instances - $first_node + 1 - -# The following only works when using the libvirt provider -$kube_node_instances_with_disks ||= false -$kube_node_instances_with_disks_size ||= "20G" -$kube_node_instances_with_disks_number ||= 2 -$override_disk_size ||= false -$disk_size ||= "20GB" -$local_path_provisioner_enabled ||= "False" -$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/" -$libvirt_nested ||= false -# boolean or string (e.g. "-vvv") -$ansible_verbosity ||= false -$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || "" - -$vagrant_dir ||= File.join(File.dirname(__FILE__), ".vagrant") - -$playbook ||= "cluster.yml" -$extra_vars ||= {} - -host_vars = {} - -def collect_networks(subnet, subnet_ipv6) - Socket.getifaddrs.filter_map do |iface| - next unless iface&.netmask&.ip_address && iface.addr - - is_ipv6 = iface.addr.ipv6? - ip = IPAddr.new(iface.addr.ip_address.split('%').first) - ip_test = is_ipv6 ? IPAddr.new("#{subnet_ipv6}::0") : IPAddr.new("#{subnet}.0") - - prefix = IPAddr.new(iface.netmask.ip_address).to_i.to_s(2).count('1') - network = ip.mask(prefix) - - [IPAddr.new("#{network}/#{prefix}"), ip_test] - end -end - -def subnet_in_use?(network_ips) - network_ips.any? { |net, test_ip| net.include?(test_ip) && test_ip != net } -end - -network_ips = collect_networks($subnet, $subnet_ipv6) - -if subnet_in_use?(network_ips) - puts "Invalid subnet provided, subnet is already in use: #{$subnet}.0" - puts "Subnets in use: #{network_ips.inspect}" - exit 1 -end - -# throw error if os is not supported -if ! SUPPORTED_OS.key?($os) - puts "Unsupported OS: #{$os}" - puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}" - exit 1 -end - -$box = SUPPORTED_OS[$os][:box] - -if Vagrant.has_plugin?("vagrant-proxyconf") - $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost" - (1..$num_instances).each do |i| - $no_proxy += ",#{$subnet}.#{i+100}" - end -end - -Vagrant.configure("2") do |config| - - config.vm.box = $box - if SUPPORTED_OS[$os].has_key? :box_url - config.vm.box_url = SUPPORTED_OS[$os][:box_url] - end - config.ssh.username = SUPPORTED_OS[$os][:user] - - # plugin conflict - if Vagrant.has_plugin?("vagrant-vbguest") then - config.vbguest.auto_update = false - end - - # always use Vagrants insecure key - config.ssh.insert_key = false - - if ($override_disk_size) - unless Vagrant.has_plugin?("vagrant-disksize") - system "vagrant plugin install vagrant-disksize" - end - config.disksize.size = $disk_size - end - - (1..$num_instances).each do |i| - config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node| - - node.vm.hostname = vm_name - - if Vagrant.has_plugin?("vagrant-proxyconf") - node.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || "" - node.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || "" - node.proxy.no_proxy = $no_proxy - end - - ["vmware_fusion", "vmware_workstation"].each do |vmware| - node.vm.provider vmware do |v| - v.vmx['memsize'] = $vm_memory - v.vmx['numvcpus'] = $vm_cpus - end - end - - node.vm.provider :virtualbox do |vb| - vb.memory = $vm_memory - vb.cpus = $vm_cpus - vb.gui = $vm_gui - vb.linked_clone = true - vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM - vb.customize ["modifyvm", :id, "--audio", "none"] - end - - node.vm.provider :libvirt do |lv| - lv.nested = $libvirt_nested - lv.cpu_mode = "host-model" - lv.memory = $vm_memory - lv.cpus = $vm_cpus - lv.default_prefix = 'kubespray' - # Fix kernel panic on fedora 28 - if $os == "fedora" - lv.cpu_mode = "host-passthrough" - end - end - - if $kube_node_instances_with_disks - # Libvirt - driverletters = ('a'..'z').to_a - node.vm.provider :libvirt do |lv| - # always make /dev/sd{a/b/c} so that CI can ensure that - # virtualbox and libvirt will have the same devices to use for OSDs - (1..$kube_node_instances_with_disks_number).each do |d| - lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi" - end - end - node.vm.provider :virtualbox do |vb| - # always make /dev/sd{a/b/c} so that CI can ensure that - # virtualbox and libvirt will have the same devices to use for OSDs - (1..$kube_node_instances_with_disks_number).each do |d| - vb.customize ['createhd', '--filename', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--size', $kube_node_instances_with_disks_size] # 10GB disk - vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', d, '--device', 0, '--type', 'hdd', '--medium', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--nonrotational', 'on', '--mtype', 'normal'] - end - end - end - - if $expose_docker_tcp - node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true - end - - $forwarded_ports.each do |guest, host| - node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true - end - - if ["rhel8"].include? $os - # Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot - # be installed until the host is registered with a valid Red Hat support subscription - node.vm.synced_folder ".", "/vagrant", disabled: false - $shared_folders.each do |src, dst| - node.vm.synced_folder src, dst - end - else - node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv'] - $shared_folders.each do |src, dst| - node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] - end - end - - ip = "#{$subnet}.#{i+100}" - ip6 = "#{$subnet_ipv6}::#{i+100}" - node.vm.network :private_network, - :ip => ip, - :libvirt__guest_ipv6 => 'yes', - :libvirt__ipv6_address => ip6, - :libvirt__ipv6_prefix => "64", - :libvirt__forward_mode => "none", - :libvirt__dhcp_enabled => false - - # libvirt__ipv6_address does not work as intended, the address is obtained with the desired prefix, but auto-generated(like fd3c:b398:698:756:5054:ff:fe48:c61e/64) - # add default route for detect ansible_default_ipv6 - # TODO: fix libvirt__ipv6 or use $subnet in shell - config.vm.provision "shell", inline: "ip -6 r a fd3c:b398:698:756::/64 dev eth1;ip -6 r add default via fd3c:b398:0698:0756::1 dev eth1 || true" - - # Disable swap for each vm - node.vm.provision "shell", inline: "swapoff -a" - - # ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that. - if ["ubuntu2004", "ubuntu2204"].include? $os - node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf" - node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf" - end - # Hack for fedora39/40 to get the IP address of the second interface - if ["fedora39", "fedora40", "fedora39-arm64", "fedora40-arm64"].include? $os - config.vm.provision "shell", inline: <<-SHELL - nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)/24 - nmcli conn modify 'Wired connection 2' ipv4.method manual - service NetworkManager restart - SHELL - end - - - # Rockylinux boxes needs UEFI - if ["rockylinux8", "rockylinux9"].include? $os - config.vm.provider "libvirt" do |domain| - domain.loader = "/usr/share/OVMF/x64/OVMF_CODE.fd" - end - end - - # Disable firewalld on oraclelinux/redhat vms - if ["oraclelinux","oraclelinux8", "rhel8","rockylinux8"].include? $os - node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld" - end - - host_vars[vm_name] = { - "ip": ip, - "flannel_interface": "eth1", - "kube_network_plugin": $network_plugin, - "kube_network_plugin_multus": $multi_networking, - "download_run_once": $download_run_once, - "download_localhost": "False", - "download_cache_dir": ENV['HOME'] + "/kubespray_cache", - # Make kubespray cache even when download_run_once is false - "download_force_cache": $download_force_cache, - # Keeping the cache on the nodes can improve provisioning speed while debugging kubespray - "download_keep_remote_cache": "False", - "docker_rpm_keepcache": "1", - # These two settings will put kubectl and admin.config in $inventory/artifacts - "kubeconfig_localhost": "True", - "kubectl_localhost": "True", - "local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}", - "local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}", - "ansible_ssh_user": SUPPORTED_OS[$os][:user], - "ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"), - "unsafe_show_logs": "True" - } - - # Only execute the Ansible provisioner once, when all the machines are up and ready. - # And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh - if i == $num_instances - node.vm.provision "ansible" do |ansible| - ansible.playbook = $playbook - ansible.compatibility_mode = "2.0" - ansible.verbose = $ansible_verbosity - ansible.become = true - ansible.limit = "all,localhost" - ansible.host_key_checking = false - ansible.raw_arguments = ["--forks=#{$num_instances}", - "--flush-cache", - "-e ansible_become_pass=vagrant"] + - $inventories.map {|inv| ["-i", inv]}.flatten - ansible.host_vars = host_vars - ansible.extra_vars = $extra_vars - if $ansible_tags != "" - ansible.tags = [$ansible_tags] - end - ansible.groups = { - "etcd" => ["#{$instance_name_prefix}-[#{$first_etcd}:#{$etcd_instances + $first_etcd - 1}]"], - "kube_control_plane" => ["#{$instance_name_prefix}-[#{$first_control_plane}:#{$control_plane_instances + $first_control_plane - 1}]"], - "kube_node" => ["#{$instance_name_prefix}-[#{$first_node}:#{$kube_node_instances + $first_node - 1}]"], - "k8s_cluster:children" => ["kube_control_plane", "kube_node"], - } - end - end - - end - end -end diff --git a/code-of-conduct.md b/code-of-conduct.md deleted file mode 100644 index 0d15c00cf32..00000000000 --- a/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kubernetes Community Code of Conduct - -Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/contrib/aws_iam/kubernetes-master-policy.json b/contrib/aws_iam/kubernetes-master-policy.json deleted file mode 100644 index e5cbaea8039..00000000000 --- a/contrib/aws_iam/kubernetes-master-policy.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["ec2:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["elasticloadbalancing:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["route53:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::kubernetes-*" - ] - } - ] -} diff --git a/contrib/aws_iam/kubernetes-master-role.json b/contrib/aws_iam/kubernetes-master-role.json deleted file mode 100644 index 66d5de1d5ae..00000000000 --- a/contrib/aws_iam/kubernetes-master-role.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { "Service": "ec2.amazonaws.com"}, - "Action": "sts:AssumeRole" - } - ] -} diff --git a/contrib/aws_iam/kubernetes-minion-policy.json b/contrib/aws_iam/kubernetes-minion-policy.json deleted file mode 100644 index af81e98c824..00000000000 --- a/contrib/aws_iam/kubernetes-minion-policy.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::kubernetes-*" - ] - }, - { - "Effect": "Allow", - "Action": "ec2:Describe*", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:AttachVolume", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:DetachVolume", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": ["route53:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] -} diff --git a/contrib/aws_iam/kubernetes-minion-role.json b/contrib/aws_iam/kubernetes-minion-role.json deleted file mode 100644 index 66d5de1d5ae..00000000000 --- a/contrib/aws_iam/kubernetes-minion-role.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { "Service": "ec2.amazonaws.com"}, - "Action": "sts:AssumeRole" - } - ] -} diff --git a/contrib/aws_inventory/kubespray-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py deleted file mode 100755 index 7527c683855..00000000000 --- a/contrib/aws_inventory/kubespray-aws-inventory.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -import boto3 -import os -import argparse -import json - -class SearchEC2Tags(object): - - def __init__(self): - self.parse_args() - if self.args.list: - self.search_tags() - if self.args.host: - data = {} - print(json.dumps(data, indent=2)) - - def parse_args(self): - - ##Check if VPC_VISIBILITY is set, if not default to private - if "VPC_VISIBILITY" in os.environ: - self.vpc_visibility = os.environ['VPC_VISIBILITY'] - else: - self.vpc_visibility = "private" - - ##Support --list and --host flags. We largely ignore the host one. - parser = argparse.ArgumentParser() - parser.add_argument('--list', action='store_true', default=False, help='List instances') - parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - def search_tags(self): - hosts = {} - hosts['_meta'] = { 'hostvars': {} } - - ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. - for group in ["kube_control_plane", "kube_node", "etcd"]: - hosts[group] = [] - tag_key = "kubespray-role" - tag_value = ["*"+group+"*"] - region = os.environ['AWS_REGION'] - - ec2 = boto3.resource('ec2', region) - filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}] - cluster_name = os.getenv('CLUSTER_NAME') - if cluster_name: - filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]}) - instances = ec2.instances.filter(Filters=filters) - for instance in instances: - - ##Suppose default vpc_visibility is private - dns_name = instance.private_dns_name - ansible_host = { - 'ansible_ssh_host': instance.private_ip_address - } - - ##Override when vpc_visibility actually is public - if self.vpc_visibility == "public": - dns_name = instance.public_dns_name - ansible_host = { - 'ansible_ssh_host': instance.public_ip_address - } - - ##Set when instance actually has node_labels - node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags)) - if node_labels_tag: - ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ]) - - ##Set when instance actually has node_taints - node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags)) - if node_taints_tag: - ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ]) - - hosts[group].append(dns_name) - hosts['_meta']['hostvars'][dns_name] = ansible_host - - hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']} - print(json.dumps(hosts, sort_keys=True, indent=2)) - -SearchEC2Tags() diff --git a/contrib/aws_inventory/requirements.txt b/contrib/aws_inventory/requirements.txt deleted file mode 100644 index 179d5de54c6..00000000000 --- a/contrib/aws_inventory/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -boto3 # Apache-2.0 diff --git a/contrib/azurerm/.gitignore b/contrib/azurerm/.gitignore deleted file mode 100644 index 3ef07f87460..00000000000 --- a/contrib/azurerm/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.generated -/inventory diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md deleted file mode 100644 index 8869ec09114..00000000000 --- a/contrib/azurerm/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Kubernetes on Azure with Azure Resource Group Templates - -Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates) - -## Status - -This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified -Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course). - -## Requirements - -- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) -- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest) -- Dedicated Resource Group created in the Azure Portal or through azure-cli - -## Configuration through group_vars/all - -You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally -unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public -key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes -experience. - -## Bastion host - -You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated -templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option -also removes all public IPs from all other VMs. - -## Generating and applying - -To generate and apply the templates, call: - -```shell -./apply-rg.sh -``` - -If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will -take care about creating/modifying whatever is needed. - -## Clearing a resource group - -If you need to delete all resources from a resource group, simply call: - -```shell -./clear-rg.sh -``` - -**WARNING** this really deletes everything from your resource group, including everything that was later created by you! - -## Installing Ansible and the dependencies - -Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible) - -## Generating an inventory for kubespray - -After you have applied the templates, you can generate an inventory with this call: - -```shell -./generate-inventory.sh -``` - -It will create the file ./inventory which can then be used with kubespray, e.g.: - -```shell -cd kubespray-root-dir -ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml -``` diff --git a/contrib/azurerm/apply-rg.sh b/contrib/azurerm/apply-rg.sh deleted file mode 100755 index 2348169d4ef..00000000000 --- a/contrib/azurerm/apply-rg.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -set -e - -AZURE_RESOURCE_GROUP="$1" - -if [ "$AZURE_RESOURCE_GROUP" == "" ]; then - echo "AZURE_RESOURCE_GROUP is missing" - exit 1 -fi - -ansible-playbook generate-templates.yml - -az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP -az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP diff --git a/contrib/azurerm/clear-rg.sh b/contrib/azurerm/clear-rg.sh deleted file mode 100755 index a2004553799..00000000000 --- a/contrib/azurerm/clear-rg.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -e - -AZURE_RESOURCE_GROUP="$1" - -if [ "$AZURE_RESOURCE_GROUP" == "" ]; then - echo "AZURE_RESOURCE_GROUP is missing" - exit 1 -fi - -ansible-playbook generate-templates.yml - -az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete diff --git a/contrib/azurerm/generate-inventory.sh b/contrib/azurerm/generate-inventory.sh deleted file mode 100755 index b3eb9c0fe64..00000000000 --- a/contrib/azurerm/generate-inventory.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -set -e - -AZURE_RESOURCE_GROUP="$1" - -if [ "$AZURE_RESOURCE_GROUP" == "" ]; then - echo "AZURE_RESOURCE_GROUP is missing" - exit 1 -fi -# check if azure cli 2.0 exists else use azure cli 1.0 -if az &>/dev/null; then - ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP" -elif azure &>/dev/null; then - ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP" -else - echo "Azure cli not found" -fi diff --git a/contrib/azurerm/generate-inventory.yml b/contrib/azurerm/generate-inventory.yml deleted file mode 100644 index 59e1e90b6a4..00000000000 --- a/contrib/azurerm/generate-inventory.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Generate Azure inventory - hosts: localhost - gather_facts: false - roles: - - generate-inventory diff --git a/contrib/azurerm/generate-inventory_2.yml b/contrib/azurerm/generate-inventory_2.yml deleted file mode 100644 index 8c2cbff86b5..00000000000 --- a/contrib/azurerm/generate-inventory_2.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Generate Azure inventory - hosts: localhost - gather_facts: false - roles: - - generate-inventory_2 diff --git a/contrib/azurerm/generate-templates.yml b/contrib/azurerm/generate-templates.yml deleted file mode 100644 index f2cf231bc4d..00000000000 --- a/contrib/azurerm/generate-templates.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Generate Azure templates - hosts: localhost - gather_facts: false - roles: - - generate-templates diff --git a/contrib/azurerm/group_vars/all b/contrib/azurerm/group_vars/all deleted file mode 100644 index 44dc1e384ee..00000000000 --- a/contrib/azurerm/group_vars/all +++ /dev/null @@ -1,51 +0,0 @@ - -# Due to some Azure limitations (ex:- Storage Account's name must be unique), -# this name must be globally unique - it will be used as a prefix for azure components -cluster_name: example - -# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion -# node that can be used to access the masters and minions -use_bastion: false - -# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion..cloudapp.azure.com. -# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host. -# bastion_domain_prefix: k8s-bastion - -number_of_k8s_masters: 3 -number_of_k8s_nodes: 3 - -masters_vm_size: Standard_A2 -masters_os_disk_size: 1000 - -minions_vm_size: Standard_A2 -minions_os_disk_size: 1000 - -admin_username: devops -admin_password: changeme - -# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines -ssh_public_keys: - - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy" - -# Disable using ssh using password. Change it to false to allow to connect to ssh by password -disablePasswordAuthentication: true - -# Azure CIDRs -azure_vnet_cidr: 10.0.0.0/8 -azure_admin_cidr: 10.241.2.0/24 -azure_masters_cidr: 10.0.4.0/24 -azure_minions_cidr: 10.240.0.0/16 - -# Azure loadbalancer port to use to access your cluster -kube_apiserver_port: 6443 - -# Azure Netwoking and storage naming to use with inventory/all.yml -#azure_virtual_network_name: KubeVNET -#azure_subnet_admin_name: ad-subnet -#azure_subnet_masters_name: master-subnet -#azure_subnet_minions_name: minion-subnet -#azure_route_table_name: routetable -#azure_security_group_name: secgroup - -# Storage types available are: "Standard_LRS","Premium_LRS" -#azure_storage_account_type: Standard_LRS diff --git a/contrib/azurerm/roles/generate-inventory/tasks/main.yml b/contrib/azurerm/roles/generate-inventory/tasks/main.yml deleted file mode 100644 index f93f1b6b281..00000000000 --- a/contrib/azurerm/roles/generate-inventory/tasks/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- name: Query Azure VMs - command: azure vm list-ip-address --json {{ azure_resource_group }} - register: vm_list_cmd - -- name: Set vm_list - set_fact: - vm_list: "{{ vm_list_cmd.stdout }}" - -- name: Generate inventory - template: - src: inventory.j2 - dest: "{{ playbook_dir }}/inventory" - mode: "0644" diff --git a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 deleted file mode 100644 index 6c5feb2cd4c..00000000000 --- a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 +++ /dev/null @@ -1,33 +0,0 @@ - -{% for vm in vm_list %} -{% if not use_bastion or vm.name == 'bastion' %} -{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }} -{% else %} -{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }} -{% endif %} -{% endfor %} - -[kube_control_plane] -{% for vm in vm_list %} -{% if 'kube_control_plane' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[etcd] -{% for vm in vm_list %} -{% if 'etcd' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[kube_node] -{% for vm in vm_list %} -{% if 'kube_node' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[k8s_cluster:children] -kube_node -kube_control_plane diff --git a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml b/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml deleted file mode 100644 index 267755b1285..00000000000 --- a/contrib/azurerm/roles/generate-inventory_2/tasks/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- - -- name: Query Azure VMs IPs - command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }} - register: vm_ip_list_cmd - -- name: Query Azure VMs Roles - command: az vm list -o json --resource-group {{ azure_resource_group }} - register: vm_list_cmd - -- name: Query Azure Load Balancer Public IP - command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip - register: lb_pubip_cmd - -- name: Set VM IP, roles lists and load balancer public IP - set_fact: - vm_ip_list: "{{ vm_ip_list_cmd.stdout }}" - vm_roles_list: "{{ vm_list_cmd.stdout }}" - lb_pubip: "{{ lb_pubip_cmd.stdout }}" - -- name: Generate inventory - template: - src: inventory.j2 - dest: "{{ playbook_dir }}/inventory" - mode: "0644" - -- name: Generate Load Balancer variables - template: - src: loadbalancer_vars.j2 - dest: "{{ playbook_dir }}/loadbalancer_vars.yml" - mode: "0644" diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 deleted file mode 100644 index 2f6ac5c4315..00000000000 --- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 +++ /dev/null @@ -1,33 +0,0 @@ - -{% for vm in vm_ip_list %} -{% if not use_bastion or vm.virtualMachine.name == 'bastion' %} -{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }} -{% else %} -{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }} -{% endif %} -{% endfor %} - -[kube_control_plane] -{% for vm in vm_roles_list %} -{% if 'kube_control_plane' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[etcd] -{% for vm in vm_roles_list %} -{% if 'etcd' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[kube_node] -{% for vm in vm_roles_list %} -{% if 'kube_node' in vm.tags.roles %} -{{ vm.name }} -{% endif %} -{% endfor %} - -[k8s_cluster:children] -kube_node -kube_control_plane diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 deleted file mode 100644 index 95a62f3274c..00000000000 --- a/contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2 +++ /dev/null @@ -1,8 +0,0 @@ -## External LB example config -apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }} -loadbalancer_apiserver: - address: {{ lb_pubip.ipAddress }} - port: 6443 - -## Internal loadbalancers for apiservers -loadbalancer_apiserver_localhost: false diff --git a/contrib/azurerm/roles/generate-templates/defaults/main.yml b/contrib/azurerm/roles/generate-templates/defaults/main.yml deleted file mode 100644 index ff6b313266f..00000000000 --- a/contrib/azurerm/roles/generate-templates/defaults/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: "2015-06-15" - -virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}" - -subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}" -subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}" -subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}" - -routeTableName: "{{ azure_route_table_name | default('routetable') }}" -securityGroupName: "{{ azure_security_group_name | default('secgroup') }}" - -nameSuffix: "{{ cluster_name }}" - -availabilitySetMasters: "master-avs" -availabilitySetMinions: "minion-avs" - -faultDomainCount: 3 -updateDomainCount: 10 - -bastionVmSize: Standard_A0 -bastionVMName: bastion -bastionIPAddressName: bastion-pubip - -disablePasswordAuthentication: true - -sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys" - -imageReference: - publisher: "OpenLogic" - offer: "CentOS" - sku: "7.5" - version: "latest" -imageReferenceJson: "{{ imageReference | to_json }}" - -storageAccountName: "sa{{ nameSuffix | replace('-', '') }}" -storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}" diff --git a/contrib/azurerm/roles/generate-templates/tasks/main.yml b/contrib/azurerm/roles/generate-templates/tasks/main.yml deleted file mode 100644 index 057d4d00547..00000000000 --- a/contrib/azurerm/roles/generate-templates/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Set base_dir - set_fact: - base_dir: "{{ playbook_dir }}/.generated/" - -- name: Create base_dir - file: - path: "{{ base_dir }}" - state: directory - recurse: true - mode: "0755" - -- name: Store json files in base_dir - template: - src: "{{ item }}" - dest: "{{ base_dir }}/{{ item }}" - mode: "0644" - with_items: - - network.json - - storage.json - - availability-sets.json - - bastion.json - - masters.json - - minions.json - - clear-rg.json diff --git a/contrib/azurerm/roles/generate-templates/templates/availability-sets.json b/contrib/azurerm/roles/generate-templates/templates/availability-sets.json deleted file mode 100644 index 78c1547a9c3..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/availability-sets.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - }, - "resources": [ - { - "type": "Microsoft.Compute/availabilitySets", - "name": "{{availabilitySetMasters}}", - "apiVersion": "{{apiVersion}}", - "location": "[resourceGroup().location]", - "properties": { - "PlatformFaultDomainCount": "{{faultDomainCount}}", - "PlatformUpdateDomainCount": "{{updateDomainCount}}" - } - }, - { - "type": "Microsoft.Compute/availabilitySets", - "name": "{{availabilitySetMinions}}", - "apiVersion": "{{apiVersion}}", - "location": "[resourceGroup().location]", - "properties": { - "PlatformFaultDomainCount": "{{faultDomainCount}}", - "PlatformUpdateDomainCount": "{{updateDomainCount}}" - } - } - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/bastion.json b/contrib/azurerm/roles/generate-templates/templates/bastion.json deleted file mode 100644 index 4cf8fc7a64b..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/bastion.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", - "subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]" - }, - "resources": [ - {% if use_bastion %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "{{bastionIPAddressName}}", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "Static", - "dnsSettings": { - {% if bastion_domain_prefix %} - "domainNameLabel": "{{ bastion_domain_prefix }}" - {% endif %} - } - } - }, - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkInterfaces", - "name": "{{bastionVMName}}-nic", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]" - ], - "properties": { - "ipConfigurations": [ - { - "name": "BastionIpConfig", - "properties": { - "privateIPAllocationMethod": "Dynamic", - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]" - }, - "subnet": { - "id": "[variables('subnetAdminRef')]" - } - } - } - ] - } - }, - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Compute/virtualMachines", - "name": "{{bastionVMName}}", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]" - ], - "tags": { - "roles": "bastion" - }, - "properties": { - "hardwareProfile": { - "vmSize": "{{bastionVmSize}}" - }, - "osProfile": { - "computerName": "{{bastionVMName}}", - "adminUsername": "{{admin_username}}", - "adminPassword": "{{admin_password}}", - "linuxConfiguration": { - "disablePasswordAuthentication": "true", - "ssh": { - "publicKeys": [ - {% for key in ssh_public_keys %} - { - "path": "{{sshKeyPath}}", - "keyData": "{{key}}" - }{% if loop.index < ssh_public_keys | length %},{% endif %} - {% endfor %} - ] - } - } - }, - "storageProfile": { - "imageReference": {{imageReferenceJson}}, - "osDisk": { - "name": "osdisk", - "vhd": { - "uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]" - }, - "caching": "ReadWrite", - "createOption": "FromImage" - } - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]" - } - ] - } - } - } - {% endif %} - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/clear-rg.json b/contrib/azurerm/roles/generate-templates/templates/clear-rg.json deleted file mode 100644 index faf31e8cca9..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/clear-rg.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [], - "outputs": {} -} diff --git a/contrib/azurerm/roles/generate-templates/templates/masters.json b/contrib/azurerm/roles/generate-templates/templates/masters.json deleted file mode 100644 index b299383a66e..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/masters.json +++ /dev/null @@ -1,198 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - "lbDomainName": "{{nameSuffix}}-api", - "lbPublicIPAddressName": "kubernetes-api-pubip", - "lbPublicIPAddressType": "Static", - "lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]", - "lbName": "kubernetes-api", - "lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]", - - "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", - "kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]" - }, - "resources": [ - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "[variables('lbPublicIPAddressName')]", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]", - "dnsSettings": { - "domainNameLabel": "[variables('lbDomainName')]" - } - } - }, - { - "apiVersion": "{{apiVersion}}", - "name": "[variables('lbName')]", - "type": "Microsoft.Network/loadBalancers", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]" - ], - "properties": { - "frontendIPConfigurations": [ - { - "name": "kube-api-frontend", - "properties": { - "publicIPAddress": { - "id": "[variables('lbPublicIPAddressID')]" - } - } - } - ], - "backendAddressPools": [ - { - "name": "kube-api-backend" - } - ], - "loadBalancingRules": [ - { - "name": "kube-api", - "properties": { - "frontendIPConfiguration": { - "id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]" - }, - "backendAddressPool": { - "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" - }, - "protocol": "tcp", - "frontendPort": "{{kube_apiserver_port}}", - "backendPort": "{{kube_apiserver_port}}", - "enableFloatingIP": false, - "idleTimeoutInMinutes": 5, - "probe": { - "id": "[concat(variables('lbID'), '/probes/kube-api')]" - } - } - } - ], - "probes": [ - { - "name": "kube-api", - "properties": { - "protocol": "tcp", - "port": "{{kube_apiserver_port}}", - "intervalInSeconds": 5, - "numberOfProbes": 2 - } - } - ] - } - }, - {% for i in range(number_of_k8s_masters) %} - {% if not use_bastion %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "master-{{i}}-pubip", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "Static" - } - }, - {% endif %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkInterfaces", - "name": "master-{{i}}-nic", - "location": "[resourceGroup().location]", - "dependsOn": [ - {% if not use_bastion %} - "[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]", - {% endif %} - "[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]" - ], - "properties": { - "ipConfigurations": [ - { - "name": "MastersIpConfig", - "properties": { - "privateIPAllocationMethod": "Dynamic", - {% if not use_bastion %} - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]" - }, - {% endif %} - "subnet": { - "id": "[variables('kubeMastersSubnetRef')]" - }, - "loadBalancerBackendAddressPools": [ - { - "id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]" - } - ] - } - } - ], - "networkSecurityGroup": { - "id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]" - }, - "enableIPForwarding": true - } - }, - { - "type": "Microsoft.Compute/virtualMachines", - "name": "master-{{i}}", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]" - ], - "tags": { - "roles": "kube_control_plane,etcd" - }, - "apiVersion": "{{apiVersion}}", - "properties": { - "availabilitySet": { - "id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]" - }, - "hardwareProfile": { - "vmSize": "{{masters_vm_size}}" - }, - "osProfile": { - "computerName": "master-{{i}}", - "adminUsername": "{{admin_username}}", - "adminPassword": "{{admin_password}}", - "linuxConfiguration": { - "disablePasswordAuthentication": "{{disablePasswordAuthentication}}", - "ssh": { - "publicKeys": [ - {% for key in ssh_public_keys %} - { - "path": "{{sshKeyPath}}", - "keyData": "{{key}}" - }{% if loop.index < ssh_public_keys | length %},{% endif %} - {% endfor %} - ] - } - } - }, - "storageProfile": { - "imageReference": {{imageReferenceJson}}, - "osDisk": { - "name": "ma{{nameSuffix}}{{i}}", - "vhd": { - "uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]" - }, - "caching": "ReadWrite", - "createOption": "FromImage", - "diskSizeGB": "{{masters_os_disk_size}}" - } - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]" - } - ] - } - } - } {% if not loop.last %},{% endif %} - {% endfor %} - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/minions.json b/contrib/azurerm/roles/generate-templates/templates/minions.json deleted file mode 100644 index bd0d059cbb6..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/minions.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]", - "kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]" - }, - "resources": [ - {% for i in range(number_of_k8s_nodes) %} - {% if not use_bastion %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/publicIPAddresses", - "name": "minion-{{i}}-pubip", - "location": "[resourceGroup().location]", - "properties": { - "publicIPAllocationMethod": "Static" - } - }, - {% endif %} - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkInterfaces", - "name": "minion-{{i}}-nic", - "location": "[resourceGroup().location]", - "dependsOn": [ - {% if not use_bastion %} - "[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]" - {% endif %} - ], - "properties": { - "ipConfigurations": [ - { - "name": "MinionsIpConfig", - "properties": { - "privateIPAllocationMethod": "Dynamic", - {% if not use_bastion %} - "publicIPAddress": { - "id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]" - }, - {% endif %} - "subnet": { - "id": "[variables('kubeMinionsSubnetRef')]" - } - } - } - ], - "networkSecurityGroup": { - "id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]" - }, - "enableIPForwarding": true - } - }, - { - "type": "Microsoft.Compute/virtualMachines", - "name": "minion-{{i}}", - "location": "[resourceGroup().location]", - "dependsOn": [ - "[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]" - ], - "tags": { - "roles": "kube_node" - }, - "apiVersion": "{{apiVersion}}", - "properties": { - "availabilitySet": { - "id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]" - }, - "hardwareProfile": { - "vmSize": "{{minions_vm_size}}" - }, - "osProfile": { - "computerName": "minion-{{i}}", - "adminUsername": "{{admin_username}}", - "adminPassword": "{{admin_password}}", - "linuxConfiguration": { - "disablePasswordAuthentication": "{{disablePasswordAuthentication}}", - "ssh": { - "publicKeys": [ - {% for key in ssh_public_keys %} - { - "path": "{{sshKeyPath}}", - "keyData": "{{key}}" - }{% if loop.index < ssh_public_keys | length %},{% endif %} - {% endfor %} - ] - } - } - }, - "storageProfile": { - "imageReference": {{imageReferenceJson}}, - "osDisk": { - "name": "mi{{nameSuffix}}{{i}}", - "vhd": { - "uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]" - }, - "caching": "ReadWrite", - "createOption": "FromImage", - "diskSizeGB": "{{minions_os_disk_size}}" - } - }, - "networkProfile": { - "networkInterfaces": [ - { - "id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]" - } - ] - } - } - } {% if not loop.last %},{% endif %} - {% endfor %} - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/network.json b/contrib/azurerm/roles/generate-templates/templates/network.json deleted file mode 100644 index 763b3dbb301..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/network.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - }, - "resources": [ - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/routeTables", - "name": "{{routeTableName}}", - "location": "[resourceGroup().location]", - "properties": { - "routes": [ - ] - } - }, - { - "type": "Microsoft.Network/virtualNetworks", - "name": "{{virtualNetworkName}}", - "location": "[resourceGroup().location]", - "apiVersion": "{{apiVersion}}", - "dependsOn": [ - "[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]" - ], - "properties": { - "addressSpace": { - "addressPrefixes": [ - "{{azure_vnet_cidr}}" - ] - }, - "subnets": [ - { - "name": "{{subnetMastersName}}", - "properties": { - "addressPrefix": "{{azure_masters_cidr}}", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" - } - } - }, - { - "name": "{{subnetMinionsName}}", - "properties": { - "addressPrefix": "{{azure_minions_cidr}}", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" - } - } - } - {% if use_bastion %} - ,{ - "name": "{{subnetAdminName}}", - "properties": { - "addressPrefix": "{{azure_admin_cidr}}", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]" - } - } - } - {% endif %} - ] - } - }, - { - "apiVersion": "{{apiVersion}}", - "type": "Microsoft.Network/networkSecurityGroups", - "name": "{{securityGroupName}}", - "location": "[resourceGroup().location]", - "properties": { - "securityRules": [ - {% if not use_bastion %} - { - "name": "ssh", - "properties": { - "description": "Allow SSH", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "22", - "sourceAddressPrefix": "Internet", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 100, - "direction": "Inbound" - } - }, - {% endif %} - { - "name": "kube-api", - "properties": { - "description": "Allow secure kube-api", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "{{kube_apiserver_port}}", - "sourceAddressPrefix": "Internet", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 101, - "direction": "Inbound" - } - } - ] - }, - "resources": [], - "dependsOn": [] - } - ] -} diff --git a/contrib/azurerm/roles/generate-templates/templates/storage.json b/contrib/azurerm/roles/generate-templates/templates/storage.json deleted file mode 100644 index 1ed08669784..00000000000 --- a/contrib/azurerm/roles/generate-templates/templates/storage.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - }, - "variables": { - }, - "resources": [ - { - "type": "Microsoft.Storage/storageAccounts", - "name": "{{storageAccountName}}", - "location": "[resourceGroup().location]", - "apiVersion": "{{apiVersion}}", - "properties": { - "accountType": "{{storageAccountType}}" - } - } - ] -} diff --git a/contrib/terraform/aws/.gitignore b/contrib/terraform/aws/.gitignore deleted file mode 100644 index 373687b8014..00000000000 --- a/contrib/terraform/aws/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.tfstate* -.terraform.lock.hcl -.terraform diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md deleted file mode 100644 index 28a9f08c415..00000000000 --- a/contrib/terraform/aws/README.md +++ /dev/null @@ -1,124 +0,0 @@ -# Kubernetes on AWS with Terraform - -## Overview - -This project will create: - -- VPC with Public and Private Subnets in # Availability Zones -- Bastion Hosts and NAT Gateways in the Public Subnet -- A dynamic number of masters, etcd, and worker nodes in the Private Subnet - - even distributed over the # of Availability Zones -- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet - -## Requirements - -- Terraform 0.12.0 or newer - -## How to Use - -- Export the variables for your AWS credentials or edit `credentials.tfvars`: - -```commandline -export TF_VAR_AWS_ACCESS_KEY_ID="www" -export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx" -export TF_VAR_AWS_SSH_KEY_NAME="yyy" -export TF_VAR_AWS_DEFAULT_REGION="zzz" -``` - -- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below. -- Create an AWS EC2 SSH Key -- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials - -Example: - -```commandline -terraform apply -var-file=credentials.tfvars -``` - -- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` -- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args` - -```commandline -ssh -F ./ssh-bastion.conf user@$ip -``` - -- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. - -Example (this one assumes you are using Ubuntu) - -```commandline -ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache -``` - -## Using other distrib than Ubuntu*** - -To leverage a Linux distribution other than Ubuntu 18.04 (Bionic) LTS for your Terraform configurations, you can adjust the AMI search filters within the 'data "aws_ami" "distro"' block by utilizing variables in your `terraform.tfvars` file. This approach ensures a flexible configuration that adapts to various Linux distributions without directly modifying the core Terraform files. - -### Example Usages - -- **Debian Jessie**: To configure the usage of Debian Jessie, insert the subsequent lines into your `terraform.tfvars`: - - ```hcl - ami_name_pattern = "debian-jessie-amd64-hvm-*" - ami_owners = ["379101102735"] - ``` - -- **Ubuntu 16.04**: To utilize Ubuntu 16.04 instead, apply the following configuration in your `terraform.tfvars`: - - ```hcl - ami_name_pattern = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*" - ami_owners = ["099720109477"] - ``` - -- **Centos 7**: For employing Centos 7, incorporate these lines into your `terraform.tfvars`: - - ```hcl - ami_name_pattern = "dcos-centos7-*" - ami_owners = ["688023202711"] - ``` - -## Connecting to Kubernetes - -You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder. - -```commandline -# Get the controller's IP address. -CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1) -CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2) - -# Get the hostname of the load balancer. -LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2) - -# Get the controller's SSH fingerprint. -ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1 -ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null - -# Get the kubeconfig from the controller. -mkdir -p ~/.kube -ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf" -scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config -sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config -kubectl get nodes -``` - -## Troubleshooting - -### Remaining AWS IAM Instance Profile - -If the cluster was destroyed without using Terraform it is possible that -the AWS IAM Instance Profiles still remain. To delete them you can use -the `AWS CLI` with the following command: - -```commandline -aws iam delete-instance-profile --region --instance-profile-name -``` - -### Ansible Inventory doesn't get created - -It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file. - -## Architecture - -Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. - -![AWS Infrastructure with Terraform ](docs/aws_kubespray.png) diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf deleted file mode 100644 index 810bd16f6e0..00000000000 --- a/contrib/terraform/aws/create-infrastructure.tf +++ /dev/null @@ -1,185 +0,0 @@ -terraform { - required_version = ">= 0.12.0" - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 5.0" - } - } -} - -provider "aws" { - access_key = var.AWS_ACCESS_KEY_ID - secret_key = var.AWS_SECRET_ACCESS_KEY - region = var.AWS_DEFAULT_REGION -} - -data "aws_availability_zones" "available" {} - -/* -* Calling modules who create the initial AWS VPC / AWS ELB -* and AWS IAM Roles for Kubernetes Deployment -*/ - -module "aws-vpc" { - source = "./modules/vpc" - - aws_cluster_name = var.aws_cluster_name - aws_vpc_cidr_block = var.aws_vpc_cidr_block - aws_avail_zones = data.aws_availability_zones.available.names - aws_cidr_subnets_private = var.aws_cidr_subnets_private - aws_cidr_subnets_public = var.aws_cidr_subnets_public - default_tags = var.default_tags -} - -module "aws-nlb" { - source = "./modules/nlb" - - aws_cluster_name = var.aws_cluster_name - aws_vpc_id = module.aws-vpc.aws_vpc_id - aws_avail_zones = data.aws_availability_zones.available.names - aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public - aws_nlb_api_port = var.aws_nlb_api_port - k8s_secure_api_port = var.k8s_secure_api_port - default_tags = var.default_tags -} - -module "aws-iam" { - source = "./modules/iam" - - aws_cluster_name = var.aws_cluster_name -} - -/* -* Create Bastion Instances in AWS -* -*/ - -resource "aws_instance" "bastion-server" { - ami = data.aws_ami.distro.id - instance_type = var.aws_bastion_size - count = var.aws_bastion_num - associate_public_ip_address = true - subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}" - Cluster = var.aws_cluster_name - Role = "bastion-${var.aws_cluster_name}-${count.index}" - })) -} - -/* -* Create K8s Master and worker nodes and etcd instances -* -*/ - -resource "aws_instance" "k8s-master" { - ami = data.aws_ami.distro.id - instance_type = var.aws_kube_master_size - - count = var.aws_kube_master_num - - subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - root_block_device { - volume_size = var.aws_kube_master_disk_size - } - - iam_instance_profile = module.aws-iam.kube_control_plane-profile - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-master${count.index}" - "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" - Role = "master" - })) -} - -resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" { - count = var.aws_kube_master_num - target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn - target_id = element(aws_instance.k8s-master.*.private_ip, count.index) -} - -resource "aws_instance" "k8s-etcd" { - ami = data.aws_ami.distro.id - instance_type = var.aws_etcd_size - - count = var.aws_etcd_num - - subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - root_block_device { - volume_size = var.aws_etcd_disk_size - } - - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}" - "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" - Role = "etcd" - })) -} - -resource "aws_instance" "k8s-worker" { - ami = data.aws_ami.distro.id - instance_type = var.aws_kube_worker_size - - count = var.aws_kube_worker_num - - subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) - - vpc_security_group_ids = module.aws-vpc.aws_security_group - - root_block_device { - volume_size = var.aws_kube_worker_disk_size - } - - iam_instance_profile = module.aws-iam.kube-worker-profile - key_name = var.AWS_SSH_KEY_NAME - - tags = merge(var.default_tags, tomap({ - Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}" - "kubernetes.io/cluster/${var.aws_cluster_name}" = "member" - Role = "worker" - })) -} - -/* -* Create Kubespray Inventory File -* -*/ -data "template_file" "inventory" { - template = file("${path.module}/templates/inventory.tpl") - - vars = { - public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip)) - connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip)) - connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip)) - list_master = join("\n", aws_instance.k8s-master.*.private_dns) - list_node = join("\n", aws_instance.k8s-worker.*.private_dns) - connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip)) - list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns))) - nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\"" - } -} - -resource "null_resource" "inventories" { - provisioner "local-exec" { - command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" - } - - triggers = { - template = data.template_file.inventory.rendered - } -} diff --git a/contrib/terraform/aws/credentials.tfvars.example b/contrib/terraform/aws/credentials.tfvars.example deleted file mode 100644 index 19420c5a7d6..00000000000 --- a/contrib/terraform/aws/credentials.tfvars.example +++ /dev/null @@ -1,8 +0,0 @@ -#AWS Access Key -AWS_ACCESS_KEY_ID = "" -#AWS Secret Key -AWS_SECRET_ACCESS_KEY = "" -#EC2 SSH Key Name -AWS_SSH_KEY_NAME = "" -#AWS Region -AWS_DEFAULT_REGION = "eu-central-1" diff --git a/contrib/terraform/aws/docs/aws_kubespray.png b/contrib/terraform/aws/docs/aws_kubespray.png deleted file mode 100644 index 40245b845a5..00000000000 Binary files a/contrib/terraform/aws/docs/aws_kubespray.png and /dev/null differ diff --git a/contrib/terraform/aws/modules/iam/main.tf b/contrib/terraform/aws/modules/iam/main.tf deleted file mode 100644 index a35afc7e596..00000000000 --- a/contrib/terraform/aws/modules/iam/main.tf +++ /dev/null @@ -1,141 +0,0 @@ -#Add AWS Roles for Kubernetes - -resource "aws_iam_role" "kube_control_plane" { - name = "kubernetes-${var.aws_cluster_name}-master" - - assume_role_policy = < 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip))) -} - -output "aws_nlb_api_fqdn" { - value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}" -} - -output "inventory" { - value = data.template_file.inventory.rendered -} - -output "default_tags" { - value = var.default_tags -} diff --git a/contrib/terraform/aws/sample-inventory/cluster.tfvars b/contrib/terraform/aws/sample-inventory/cluster.tfvars deleted file mode 100644 index 8aca21909a4..00000000000 --- a/contrib/terraform/aws/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,59 +0,0 @@ -#Global Vars -aws_cluster_name = "devtest" - -#VPC Vars -aws_vpc_cidr_block = "10.250.192.0/18" - -aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] - -aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] - -#Bastion Host -aws_bastion_num = 1 - -aws_bastion_size = "t2.medium" - -#Kubernetes Cluster - -aws_kube_master_num = 3 - -aws_kube_master_size = "t2.medium" - -aws_kube_master_disk_size = 50 - -aws_etcd_num = 3 - -aws_etcd_size = "t2.medium" - -aws_etcd_disk_size = 50 - -aws_kube_worker_num = 4 - -aws_kube_worker_size = "t2.medium" - -aws_kube_worker_disk_size = 50 - -#Settings AWS NLB - -aws_nlb_api_port = 6443 - -k8s_secure_api_port = 6443 - -default_tags = { - # Env = "devtest" # Product = "kubernetes" -} - -inventory_file = "../../../inventory/hosts" - -## Credentials -#AWS Access Key -AWS_ACCESS_KEY_ID = "" - -#AWS Secret Key -AWS_SECRET_ACCESS_KEY = "" - -#EC2 SSH Key Name -AWS_SSH_KEY_NAME = "" - -#AWS Region -AWS_DEFAULT_REGION = "eu-central-1" diff --git a/contrib/terraform/aws/sample-inventory/group_vars b/contrib/terraform/aws/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/aws/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl deleted file mode 100644 index 10a3995e1bd..00000000000 --- a/contrib/terraform/aws/templates/inventory.tpl +++ /dev/null @@ -1,27 +0,0 @@ -[all] -${connection_strings_master} -${connection_strings_node} -${connection_strings_etcd} -${public_ip_address_bastion} - -[bastion] -${public_ip_address_bastion} - -[kube_control_plane] -${list_master} - -[kube_node] -${list_node} - -[etcd] -${list_etcd} - -[calico_rr] - -[k8s_cluster:children] -kube_node -kube_control_plane -calico_rr - -[k8s_cluster:vars] -${nlb_api_fqdn} diff --git a/contrib/terraform/aws/terraform.tfvars b/contrib/terraform/aws/terraform.tfvars deleted file mode 100644 index 693fa9bfbd9..00000000000 --- a/contrib/terraform/aws/terraform.tfvars +++ /dev/null @@ -1,43 +0,0 @@ -#Global Vars -aws_cluster_name = "devtest" - -#VPC Vars -aws_vpc_cidr_block = "10.250.192.0/18" -aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] -aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] - -# single AZ deployment -#aws_cidr_subnets_private = ["10.250.192.0/20"] -#aws_cidr_subnets_public = ["10.250.224.0/20"] - -# 3+ AZ deployment -#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"] -#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"] - -#Bastion Host -aws_bastion_num = 1 -aws_bastion_size = "t3.small" - -#Kubernetes Cluster -aws_kube_master_num = 3 -aws_kube_master_size = "t3.medium" -aws_kube_master_disk_size = 50 - -aws_etcd_num = 0 -aws_etcd_size = "t3.medium" -aws_etcd_disk_size = 50 - -aws_kube_worker_num = 4 -aws_kube_worker_size = "t3.medium" -aws_kube_worker_disk_size = 50 - -#Settings AWS ELB -aws_nlb_api_port = 6443 -k8s_secure_api_port = 6443 - -default_tags = { - # Env = "devtest" - # Product = "kubernetes" -} - -inventory_file = "../../../inventory/hosts" diff --git a/contrib/terraform/aws/terraform.tfvars.example b/contrib/terraform/aws/terraform.tfvars.example deleted file mode 100644 index 584b6a23659..00000000000 --- a/contrib/terraform/aws/terraform.tfvars.example +++ /dev/null @@ -1,33 +0,0 @@ -#Global Vars -aws_cluster_name = "devtest" - -#VPC Vars -aws_vpc_cidr_block = "10.250.192.0/18" -aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"] -aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"] -aws_avail_zones = ["eu-central-1a","eu-central-1b"] - -#Bastion Host -aws_bastion_num = 1 -aws_bastion_size = "t3.small" - -#Kubernetes Cluster -aws_kube_master_num = 3 -aws_kube_master_size = "t3.medium" -aws_kube_master_disk_size = 50 - -aws_etcd_num = 3 -aws_etcd_size = "t3.medium" -aws_etcd_disk_size = 50 - -aws_kube_worker_num = 4 -aws_kube_worker_size = "t3.medium" -aws_kube_worker_disk_size = 50 - -#Settings AWS ELB -aws_nlb_api_port = 6443 -k8s_secure_api_port = 6443 - -default_tags = { } - -inventory_file = "../../../inventory/hosts" diff --git a/contrib/terraform/aws/variables.tf b/contrib/terraform/aws/variables.tf deleted file mode 100644 index 783d4adffbb..00000000000 --- a/contrib/terraform/aws/variables.tf +++ /dev/null @@ -1,143 +0,0 @@ -variable "AWS_ACCESS_KEY_ID" { - description = "AWS Access Key" -} - -variable "AWS_SECRET_ACCESS_KEY" { - description = "AWS Secret Key" -} - -variable "AWS_SSH_KEY_NAME" { - description = "Name of the SSH keypair to use in AWS." -} - -variable "AWS_DEFAULT_REGION" { - description = "AWS Region" -} - -//General Cluster Settings - -variable "aws_cluster_name" { - description = "Name of AWS Cluster" -} - -variable "ami_name_pattern" { - description = "The name pattern to use for AMI lookup" - type = string - default = "debian-10-amd64-*" -} - -variable "ami_virtualization_type" { - description = "The virtualization type to use for AMI lookup" - type = string - default = "hvm" -} - -variable "ami_owners" { - description = "The owners to use for AMI lookup" - type = list(string) - default = ["136693071363"] -} - -data "aws_ami" "distro" { - most_recent = true - - filter { - name = "name" - values = [var.ami_name_pattern] - } - - filter { - name = "virtualization-type" - values = [var.ami_virtualization_type] - } - - owners = var.ami_owners -} - -//AWS VPC Variables - -variable "aws_vpc_cidr_block" { - description = "CIDR Block for VPC" -} - -variable "aws_cidr_subnets_private" { - description = "CIDR Blocks for private subnets in Availability Zones" - type = list(string) -} - -variable "aws_cidr_subnets_public" { - description = "CIDR Blocks for public subnets in Availability Zones" - type = list(string) -} - -//AWS EC2 Settings - -variable "aws_bastion_size" { - description = "EC2 Instance Size of Bastion Host" -} - -/* -* AWS EC2 Settings -* The number should be divisable by the number of used -* AWS Availability Zones without an remainder. -*/ -variable "aws_bastion_num" { - description = "Number of Bastion Nodes" -} - -variable "aws_kube_master_num" { - description = "Number of Kubernetes Master Nodes" -} - -variable "aws_kube_master_disk_size" { - description = "Disk size for Kubernetes Master Nodes (in GiB)" -} - -variable "aws_kube_master_size" { - description = "Instance size of Kube Master Nodes" -} - -variable "aws_etcd_num" { - description = "Number of etcd Nodes" -} - -variable "aws_etcd_disk_size" { - description = "Disk size for etcd Nodes (in GiB)" -} - -variable "aws_etcd_size" { - description = "Instance size of etcd Nodes" -} - -variable "aws_kube_worker_num" { - description = "Number of Kubernetes Worker Nodes" -} - -variable "aws_kube_worker_disk_size" { - description = "Disk size for Kubernetes Worker Nodes (in GiB)" -} - -variable "aws_kube_worker_size" { - description = "Instance size of Kubernetes Worker Nodes" -} - -/* -* AWS NLB Settings -* -*/ -variable "aws_nlb_api_port" { - description = "Port for AWS NLB" -} - -variable "k8s_secure_api_port" { - description = "Secure Port of K8S API Server" -} - -variable "default_tags" { - description = "Default tags for all resources" - type = map(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} diff --git a/contrib/terraform/exoscale/README.md b/contrib/terraform/exoscale/README.md deleted file mode 100644 index be451cce816..00000000000 --- a/contrib/terraform/exoscale/README.md +++ /dev/null @@ -1,152 +0,0 @@ -# Kubernetes on Exoscale with Terraform - -Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster - +-----------------------+ -+---------------+ | +--------------+ | -| | | | +--------------+ | -| API server LB +---------> | | | | -| | | | | Master/etcd | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - | ^ | - | | | - | v | -+---------------+ | +--------------+ | -| | | | +--------------+ | -| Ingress LB +---------> | | | | -| | | | | Worker | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - +-----------------------+ -``` - -## Requirements - -* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files) - -## Quickstart - -NOTE: *Assumes you are at the root of the kubespray repo* - -Copy the sample inventory for your cluster and copy the default terraform variables. - -```bash -CLUSTER=my-exoscale-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/ -cd inventory/$CLUSTER -``` - -Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`. - -```bash -# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc. -$EDITOR default.tfvars -``` - -For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`. -The file should look like something like this: - -```ini -[cloudstack] -key = -secret = -``` - -Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys. - -### Encrypted credentials - -To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime. - -```bash -cat << EOF > cloudstack.ini -[cloudstack] -key = -secret = -EOF -sops --encrypt --in-place --pgp cloudstack.ini -sops cloudstack.ini -``` - -Run terraform to create the infrastructure - -```bash -terraform init ../../contrib/terraform/exoscale -terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale -``` - -If your cloudstack credentials file is encrypted using sops, run the following: - -```bash -terraform init ../../contrib/terraform/exoscale -sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale' -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can now copy your inventory file and use it with kubespray to set up a cluster. -You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -Example to use this with the default sample inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Teardown - -The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy: - -```bash -terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale -``` - -## Variables - -### Required - -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `zone`: The zone where to run the cluster -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `size`: The size to use - * `boot_disk`: The boot disk to use - * `image_name`: Name of the image - * `root_partition_size`: Size *(in GB)* for the root partition - * `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)* - * `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)* -* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes -* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server -* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) - -### Optional - -* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)* - -An example variables file can be found `default.tfvars` - -## Known limitations - -### Only single disk - -Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local). - -### No Kubernetes API - -The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager). -This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode. diff --git a/contrib/terraform/exoscale/default.tfvars b/contrib/terraform/exoscale/default.tfvars deleted file mode 100644 index 8388d586adc..00000000000 --- a/contrib/terraform/exoscale/default.tfvars +++ /dev/null @@ -1,65 +0,0 @@ -prefix = "default" -zone = "ch-gva-2" - -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "standard.medium", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-0" : { - "node_type" : "worker", - "size" : "standard.large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-1" : { - "node_type" : "worker", - "size" : "standard.large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-2" : { - "node_type" : "worker", - "size" : "standard.large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/exoscale/main.tf b/contrib/terraform/exoscale/main.tf deleted file mode 100644 index eb9fcabcdd3..00000000000 --- a/contrib/terraform/exoscale/main.tf +++ /dev/null @@ -1,49 +0,0 @@ -provider "exoscale" {} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - - prefix = var.prefix - zone = var.zone - machines = var.machines - - ssh_public_keys = var.ssh_public_keys - - ssh_whitelist = var.ssh_whitelist - api_server_whitelist = var.api_server_whitelist - nodeport_whitelist = var.nodeport_whitelist -} - -# -# Generate ansible inventory -# - -data "template_file" "inventory" { - template = file("${path.module}/templates/inventory.tpl") - - vars = { - connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", - keys(module.kubernetes.master_ip_addresses), - values(module.kubernetes.master_ip_addresses).*.public_ip, - values(module.kubernetes.master_ip_addresses).*.private_ip, - range(1, length(module.kubernetes.master_ip_addresses) + 1))) - connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", - keys(module.kubernetes.worker_ip_addresses), - values(module.kubernetes.worker_ip_addresses).*.public_ip, - values(module.kubernetes.worker_ip_addresses).*.private_ip)) - - list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) - list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) - api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address - } -} - -resource "null_resource" "inventories" { - provisioner "local-exec" { - command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" - } - - triggers = { - template = data.template_file.inventory.rendered - } -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 3ea4f4f2c7f..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,191 +0,0 @@ -data "exoscale_template" "os_image" { - for_each = var.machines - - zone = var.zone - name = each.value.boot_disk.image_name -} - -data "exoscale_compute_instance" "master_nodes" { - for_each = exoscale_compute_instance.master - - id = each.value.id - zone = var.zone -} - -data "exoscale_compute_instance" "worker_nodes" { - for_each = exoscale_compute_instance.worker - - id = each.value.id - zone = var.zone -} - -resource "exoscale_private_network" "private_network" { - zone = var.zone - name = "${var.prefix}-network" - - start_ip = cidrhost(var.private_network_cidr, 1) - # cidr -1 = Broadcast address - # cidr -2 = DHCP server address (exoscale specific) - end_ip = cidrhost(var.private_network_cidr, -3) - netmask = cidrnetmask(var.private_network_cidr) -} - -resource "exoscale_compute_instance" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - template_id = data.exoscale_template.os_image[each.key].id - type = each.value.size - disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size - state = "Running" - zone = var.zone - security_group_ids = [exoscale_security_group.master_sg.id] - network_interface { - network_id = exoscale_private_network.private_network.id - } - elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id] - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address - node_local_partition_size = each.value.boot_disk.node_local_partition_size - ceph_partition_size = each.value.boot_disk.ceph_partition_size - root_partition_size = each.value.boot_disk.root_partition_size - node_type = "master" - ssh_public_keys = var.ssh_public_keys - } - ) -} - -resource "exoscale_compute_instance" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - template_id = data.exoscale_template.os_image[each.key].id - type = each.value.size - disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size - state = "Running" - zone = var.zone - security_group_ids = [exoscale_security_group.worker_sg.id] - network_interface { - network_id = exoscale_private_network.private_network.id - } - elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id] - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address - node_local_partition_size = each.value.boot_disk.node_local_partition_size - ceph_partition_size = each.value.boot_disk.ceph_partition_size - root_partition_size = each.value.boot_disk.root_partition_size - node_type = "worker" - ssh_public_keys = var.ssh_public_keys - } - ) -} - -resource "exoscale_security_group" "master_sg" { - name = "${var.prefix}-master-sg" - description = "Security group for Kubernetes masters" -} - -resource "exoscale_security_group_rule" "master_sg_rule_ssh" { - security_group_id = exoscale_security_group.master_sg.id - - for_each = toset(var.ssh_whitelist) - # SSH - type = "INGRESS" - start_port = 22 - end_port = 22 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" { - security_group_id = exoscale_security_group.master_sg.id - - for_each = toset(var.api_server_whitelist) - # Kubernetes API - type = "INGRESS" - start_port = 6443 - end_port = 6443 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_security_group" "worker_sg" { - name = "${var.prefix}-worker-sg" - description = "security group for kubernetes worker nodes" -} - -resource "exoscale_security_group_rule" "worker_sg_rule_ssh" { - security_group_id = exoscale_security_group.worker_sg.id - - # SSH - for_each = toset(var.ssh_whitelist) - type = "INGRESS" - start_port = 22 - end_port = 22 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_security_group_rule" "worker_sg_rule_http" { - security_group_id = exoscale_security_group.worker_sg.id - - # HTTP(S) - for_each = toset(["80", "443"]) - type = "INGRESS" - start_port = each.value - end_port = each.value - protocol = "TCP" - cidr = "0.0.0.0/0" -} - - -resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" { - security_group_id = exoscale_security_group.worker_sg.id - - # HTTP(S) - for_each = toset(var.nodeport_whitelist) - type = "INGRESS" - start_port = 30000 - end_port = 32767 - protocol = "TCP" - cidr = each.value -} - -resource "exoscale_elastic_ip" "ingress_controller_lb" { - zone = var.zone - healthcheck { - mode = "http" - port = 80 - uri = "/healthz" - interval = 10 - timeout = 2 - strikes_ok = 2 - strikes_fail = 3 - } -} - -resource "exoscale_elastic_ip" "control_plane_lb" { - zone = var.zone - healthcheck { - mode = "tcp" - port = 6443 - interval = 10 - timeout = 2 - strikes_ok = 2 - strikes_fail = 3 - } -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf deleted file mode 100644 index b288bdb49ec..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,31 +0,0 @@ -output "master_ip_addresses" { - value = { - for key, instance in exoscale_compute_instance.master : - instance.name => { - "private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : "" - "public_ip" = exoscale_compute_instance.master[key].ip_address - } - } -} - -output "worker_ip_addresses" { - value = { - for key, instance in exoscale_compute_instance.worker : - instance.name => { - "private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : "" - "public_ip" = exoscale_compute_instance.worker[key].ip_address - } - } -} - -output "cluster_private_network_cidr" { - value = var.private_network_cidr -} - -output "ingress_controller_lb_ip_address" { - value = exoscale_elastic_ip.ingress_controller_lb.ip_address -} - -output "control_plane_lb_ip_address" { - value = exoscale_elastic_ip.control_plane_lb.ip_address -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl b/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl deleted file mode 100644 index a81b8e38a42..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/templates/cloud-init.tmpl +++ /dev/null @@ -1,52 +0,0 @@ -#cloud-config -%{ if ceph_partition_size > 0 || node_local_partition_size > 0} -bootcmd: -- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ] -%{ if node_local_partition_size > 0 } - # Create partition for node local storage -- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ] -- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ] -%{ endif } -%{ if ceph_partition_size > 0 } - # Create partition for rook to use for ceph -- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ] -%{ endif } -%{ endif } - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} - -write_files: - - path: /etc/netplan/eth1.yaml - content: | - network: - version: 2 - ethernets: - eth1: - dhcp4: true -%{ if node_type == "worker" } - # TODO: When a VM is seen as healthy and is added to the EIP loadbalancer - # pool it no longer can send traffic back to itself via the EIP IP - # address. - # Remove this if it ever gets solved. - - path: /etc/netplan/20-eip-fix.yaml - content: | - network: - version: 2 - ethernets: - "lo:0": - match: - name: lo - dhcp4: false - addresses: - - ${eip_ip_address}/32 -%{ endif } -runcmd: - - netplan apply -%{ if node_local_partition_size > 0 } - - mkdir -p /mnt/disks/node-local-storage - - chown nobody:nogroup /mnt/disks/node-local-storage - - mount /dev/vda2 /mnt/disks/node-local-storage -%{ endif } diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index c466abfe15b..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,42 +0,0 @@ -variable "zone" { - type = string - # This is currently the only zone that is supposed to be supporting - # so called "managed private networks". - # See: https://www.exoscale.com/syslog/introducing-managed-private-networks - default = "ch-gva-2" -} - -variable "prefix" {} - -variable "machines" { - type = map(object({ - node_type = string - size = string - boot_disk = object({ - image_name = string - root_partition_size = number - ceph_partition_size = number - node_local_partition_size = number - }) - })) -} - -variable "ssh_public_keys" { - type = list(string) -} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "private_network_cidr" { - default = "172.0.10.0/24" -} diff --git a/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf b/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 047420aecea..00000000000 --- a/contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - exoscale = { - source = "exoscale/exoscale" - version = ">= 0.21" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/exoscale/output.tf b/contrib/terraform/exoscale/output.tf deleted file mode 100644 index 09bf7fa4a12..00000000000 --- a/contrib/terraform/exoscale/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ips" { - value = module.kubernetes.master_ip_addresses -} - -output "worker_ips" { - value = module.kubernetes.worker_ip_addresses -} - -output "ingress_controller_lb_ip_address" { - value = module.kubernetes.ingress_controller_lb_ip_address -} - -output "control_plane_lb_ip_address" { - value = module.kubernetes.control_plane_lb_ip_address -} diff --git a/contrib/terraform/exoscale/sample-inventory/cluster.tfvars b/contrib/terraform/exoscale/sample-inventory/cluster.tfvars deleted file mode 100644 index f6152412647..00000000000 --- a/contrib/terraform/exoscale/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,65 +0,0 @@ -prefix = "default" -zone = "ch-gva-2" - -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "Small", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-0" : { - "node_type" : "worker", - "size" : "Large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-1" : { - "node_type" : "worker", - "size" : "Large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - }, - "worker-2" : { - "node_type" : "worker", - "size" : "Large", - "boot_disk" : { - "image_name" : "Linux Ubuntu 20.04 LTS 64-bit", - "root_partition_size" : 50, - "node_local_partition_size" : 0, - "ceph_partition_size" : 0 - } - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/exoscale/sample-inventory/group_vars b/contrib/terraform/exoscale/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/exoscale/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/exoscale/templates/inventory.tpl b/contrib/terraform/exoscale/templates/inventory.tpl deleted file mode 100644 index 85ed1924b1d..00000000000 --- a/contrib/terraform/exoscale/templates/inventory.tpl +++ /dev/null @@ -1,19 +0,0 @@ -[all] -${connection_strings_master} -${connection_strings_worker} - -[kube_control_plane] -${list_master} - -[kube_control_plane:vars] -supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ] - -[etcd] -${list_master} - -[kube_node] -${list_worker} - -[k8s_cluster:children] -kube_control_plane -kube_node diff --git a/contrib/terraform/exoscale/variables.tf b/contrib/terraform/exoscale/variables.tf deleted file mode 100644 index 14f8455796c..00000000000 --- a/contrib/terraform/exoscale/variables.tf +++ /dev/null @@ -1,46 +0,0 @@ -variable "zone" { - description = "The zone where to run the cluster" -} - -variable "prefix" { - description = "Prefix for resource names" - default = "default" -} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - size = string - boot_disk = object({ - image_name = string - root_partition_size = number - ceph_partition_size = number - node_local_partition_size = number - }) - })) -} - -variable "ssh_public_keys" { - description = "List of public SSH keys which are injected into the VMs." - type = list(string) -} - -variable "ssh_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for ssh" - type = list(string) -} - -variable "api_server_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" - type = list(string) -} - -variable "nodeport_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" - type = list(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} diff --git a/contrib/terraform/exoscale/versions.tf b/contrib/terraform/exoscale/versions.tf deleted file mode 100644 index 0333b41b96a..00000000000 --- a/contrib/terraform/exoscale/versions.tf +++ /dev/null @@ -1,15 +0,0 @@ -terraform { - required_providers { - exoscale = { - source = "exoscale/exoscale" - version = ">= 0.21" - } - null = { - source = "hashicorp/null" - } - template = { - source = "hashicorp/template" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/gcp/README.md b/contrib/terraform/gcp/README.md deleted file mode 100644 index 01e5299db01..00000000000 --- a/contrib/terraform/gcp/README.md +++ /dev/null @@ -1,104 +0,0 @@ -# Kubernetes on GCP with Terraform - -Provision a Kubernetes cluster on GCP using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster - +-----------------------+ -+---------------+ | +--------------+ | -| | | | +--------------+ | -| API server LB +---------> | | | | -| | | | | Master/etcd | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - | ^ | - | | | - | v | -+---------------+ | +--------------+ | -| | | | +--------------+ | -| Ingress LB +---------> | | | | -| | | | | Worker | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------+ | - +-----------------------+ -``` - -## Requirements - -* Terraform 0.12.0 or newer - -## Quickstart - -To get a cluster up and running you'll need a JSON keyfile. -Set the path to the file in the `tfvars.json` file and run the following: - -```bash -terraform apply -var-file tfvars.json -state dev-cluster.tfstate -var gcp_project_id= -var keyfile_location= -``` - -To generate kubespray inventory based on the terraform state file you can run the following: - -```bash -./generate-inventory.sh dev-cluster.tfstate > inventory.ini -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray, e.g. - -```bash -ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v -``` - -## Variables - -### Required - -* `keyfile_location`: Location to the keyfile to use as credentials for the google terraform provider -* `gcp_project_id`: ID of the GCP project to deploy the cluster in -* `ssh_pub_key`: Path to public ssh key to use for all machines -* `region`: The region where to run the cluster -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `size`: The size to use - * `zone`: The zone the machine should run in - * `additional_disks`: Extra disks to add to the machine. Key of this object will be used as the disk name - * `size`: Size of the disk (in GB) - * `boot_disk`: The boot disk to use - * `image_name`: Name of the image - * `size`: Size of the boot disk (in GB) -* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes -* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server -* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) -* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443 -* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule - * `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]` - * `protocol`: Protocol. Example `"tcp"` - * `ports`: List of ports, as string. Example `["53"]` - * `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]` - -### Optional - -* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)* -* `master_sa_email`: Service account email to use for the control plane nodes *(Defaults to `""`, auto generate one)* -* `master_sa_scopes`: Service account email to use for the control plane nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)* -* `master_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible) - for the control plane nodes *(Defaults to `false`)* -* `master_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types) - for extra disks added on the control plane nodes *(Defaults to `"pd-ssd"`)* -* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)* -* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)* -* `worker_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible) - for the worker nodes *(Defaults to `false`)* -* `worker_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types) - for extra disks added on the worker nodes *(Defaults to `"pd-ssd"`)* - -An example variables file can be found `tfvars.json` - -## Known limitations - -This solution does not provide a solution to use a bastion host. Thus all the nodes must expose a public IP for kubespray to work. diff --git a/contrib/terraform/gcp/generate-inventory.sh b/contrib/terraform/gcp/generate-inventory.sh deleted file mode 100755 index 585a4f415eb..00000000000 --- a/contrib/terraform/gcp/generate-inventory.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash - -# -# Generates a inventory file based on the terraform output. -# After provisioning a cluster, simply run this command and supply the terraform state file -# Default state file is terraform.tfstate -# - -set -e - -usage () { - echo "Usage: $0 " >&2 - exit 1 -} - -if [[ $# -ne 1 ]]; then - usage -fi - -TF_STATE_FILE=${1} - -if [[ ! -f "${TF_STATE_FILE}" ]]; then - echo "ERROR: state file ${TF_STATE_FILE} doesn't exist" >&2 - usage -fi - -TF_OUT=$(terraform output -state "${TF_STATE_FILE}" -json) - -MASTERS=$(jq -r '.master_ips.value | to_entries[]' <(echo "${TF_OUT}")) -WORKERS=$(jq -r '.worker_ips.value | to_entries[]' <(echo "${TF_OUT}")) -mapfile -t MASTER_NAMES < <(jq -r '.key' <(echo "${MASTERS}")) -mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}")) - -API_LB=$(jq -r '.control_plane_lb_ip_address.value' <(echo "${TF_OUT}")) - -# Generate master hosts -i=1 -for name in "${MASTER_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${MASTERS}")) - public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${MASTERS}")) - echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip} etcd_member_name=etcd${i}" - i=$(( i + 1 )) -done - -# Generate worker hosts -for name in "${WORKER_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}")) - public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${WORKERS}")) - echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip}" -done - -echo "" -echo "[kube_control_plane]" -for name in "${MASTER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[kube_control_plane:vars]" -echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate -echo "" -echo "[etcd]" -for name in "${MASTER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[kube_node]" -for name in "${WORKER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[k8s_cluster:children]" -echo "kube_control_plane" -echo "kube_node" diff --git a/contrib/terraform/gcp/main.tf b/contrib/terraform/gcp/main.tf deleted file mode 100644 index b0b91f57b35..00000000000 --- a/contrib/terraform/gcp/main.tf +++ /dev/null @@ -1,39 +0,0 @@ -terraform { - required_providers { - google = { - source = "hashicorp/google" - version = "~> 4.0" - } - } -} - -provider "google" { - credentials = file(var.keyfile_location) - region = var.region - project = var.gcp_project_id -} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - region = var.region - prefix = var.prefix - - machines = var.machines - ssh_pub_key = var.ssh_pub_key - - master_sa_email = var.master_sa_email - master_sa_scopes = var.master_sa_scopes - master_preemptible = var.master_preemptible - master_additional_disk_type = var.master_additional_disk_type - worker_sa_email = var.worker_sa_email - worker_sa_scopes = var.worker_sa_scopes - worker_preemptible = var.worker_preemptible - worker_additional_disk_type = var.worker_additional_disk_type - - ssh_whitelist = var.ssh_whitelist - api_server_whitelist = var.api_server_whitelist - nodeport_whitelist = var.nodeport_whitelist - ingress_whitelist = var.ingress_whitelist - - extra_ingress_firewalls = var.extra_ingress_firewalls -} diff --git a/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf b/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf deleted file mode 100644 index a83b73bb251..00000000000 --- a/contrib/terraform/gcp/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,421 +0,0 @@ -################################################# -## -## General -## - -resource "google_compute_network" "main" { - name = "${var.prefix}-network" - - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "main" { - name = "${var.prefix}-subnet" - network = google_compute_network.main.name - ip_cidr_range = var.private_network_cidr - region = var.region -} - -resource "google_compute_firewall" "deny_all" { - name = "${var.prefix}-default-firewall" - network = google_compute_network.main.name - - priority = 1000 - - source_ranges = ["0.0.0.0/0"] - - deny { - protocol = "all" - } -} - -resource "google_compute_firewall" "allow_internal" { - name = "${var.prefix}-internal-firewall" - network = google_compute_network.main.name - - priority = 500 - - source_ranges = [var.private_network_cidr] - - allow { - protocol = "all" - } -} - -resource "google_compute_firewall" "ssh" { - count = length(var.ssh_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-ssh-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.ssh_whitelist - - allow { - protocol = "tcp" - ports = ["22"] - } -} - -resource "google_compute_firewall" "api_server" { - count = length(var.api_server_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-api-server-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.api_server_whitelist - - allow { - protocol = "tcp" - ports = ["6443"] - } -} - -resource "google_compute_firewall" "nodeport" { - count = length(var.nodeport_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-nodeport-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.nodeport_whitelist - - allow { - protocol = "tcp" - ports = ["30000-32767"] - } -} - -resource "google_compute_firewall" "ingress_http" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-http-ingress-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.ingress_whitelist - - allow { - protocol = "tcp" - ports = ["80"] - } -} - -resource "google_compute_firewall" "ingress_https" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-https-ingress-firewall" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = var.ingress_whitelist - - allow { - protocol = "tcp" - ports = ["443"] - } -} - -################################################# -## -## Local variables -## - -locals { - master_target_list = [ - for name, machine in google_compute_instance.master : - "${machine.zone}/${machine.name}" - ] - - worker_target_list = [ - for name, machine in google_compute_instance.worker : - "${machine.zone}/${machine.name}" - ] - - master_disks = flatten([ - for machine_name, machine in var.machines : [ - for disk_name, disk in machine.additional_disks : { - "${machine_name}-${disk_name}" = { - "machine_name": machine_name, - "machine": machine, - "disk_size": disk.size, - "disk_name": disk_name - } - } - ] - if machine.node_type == "master" - ]) - - worker_disks = flatten([ - for machine_name, machine in var.machines : [ - for disk_name, disk in machine.additional_disks : { - "${machine_name}-${disk_name}" = { - "machine_name": machine_name, - "machine": machine, - "disk_size": disk.size, - "disk_name": disk_name - } - } - ] - if machine.node_type == "worker" - ]) -} - -################################################# -## -## Master -## - -resource "google_compute_address" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}-pip" - address_type = "EXTERNAL" - region = var.region -} - -resource "google_compute_disk" "master" { - for_each = { - for item in local.master_disks : - keys(item)[0] => values(item)[0] - } - - name = "${var.prefix}-${each.key}" - type = var.master_additional_disk_type - zone = each.value.machine.zone - size = each.value.disk_size - - physical_block_size_bytes = 4096 -} - -resource "google_compute_attached_disk" "master" { - for_each = { - for item in local.master_disks : - keys(item)[0] => values(item)[0] - } - - disk = google_compute_disk.master[each.key].id - instance = google_compute_instance.master[each.value.machine_name].id -} - -resource "google_compute_instance" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - machine_type = each.value.size - zone = each.value.zone - - tags = ["control-plane", "master", each.key] - - boot_disk { - initialize_params { - image = each.value.boot_disk.image_name - size = each.value.boot_disk.size - } - } - - network_interface { - subnetwork = google_compute_subnetwork.main.name - - access_config { - nat_ip = google_compute_address.master[each.key].address - } - } - - metadata = { - ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}" - } - - service_account { - email = var.master_sa_email - scopes = var.master_sa_scopes - } - - # Since we use google_compute_attached_disk we need to ignore this - lifecycle { - ignore_changes = [attached_disk] - } - - scheduling { - preemptible = var.master_preemptible - automatic_restart = !var.master_preemptible - } -} - -resource "google_compute_forwarding_rule" "master_lb" { - count = length(var.api_server_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-master-lb-forward-rule" - - port_range = "6443" - - target = google_compute_target_pool.master_lb[count.index].id -} - -resource "google_compute_target_pool" "master_lb" { - count = length(var.api_server_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-master-lb-pool" - instances = local.master_target_list -} - -################################################# -## -## Worker -## - -resource "google_compute_disk" "worker" { - for_each = { - for item in local.worker_disks : - keys(item)[0] => values(item)[0] - } - - name = "${var.prefix}-${each.key}" - type = var.worker_additional_disk_type - zone = each.value.machine.zone - size = each.value.disk_size - - physical_block_size_bytes = 4096 -} - -resource "google_compute_attached_disk" "worker" { - for_each = { - for item in local.worker_disks : - keys(item)[0] => values(item)[0] - } - - disk = google_compute_disk.worker[each.key].id - instance = google_compute_instance.worker[each.value.machine_name].id -} - -resource "google_compute_address" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}-pip" - address_type = "EXTERNAL" - region = var.region -} - -resource "google_compute_instance" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - machine_type = each.value.size - zone = each.value.zone - - tags = ["worker", each.key] - - boot_disk { - initialize_params { - image = each.value.boot_disk.image_name - size = each.value.boot_disk.size - } - } - - network_interface { - subnetwork = google_compute_subnetwork.main.name - - access_config { - nat_ip = google_compute_address.worker[each.key].address - } - } - - metadata = { - ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}" - } - - service_account { - email = var.worker_sa_email - scopes = var.worker_sa_scopes - } - - # Since we use google_compute_attached_disk we need to ignore this - lifecycle { - ignore_changes = [attached_disk] - } - - scheduling { - preemptible = var.worker_preemptible - automatic_restart = !var.worker_preemptible - } -} - -resource "google_compute_address" "worker_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-lb-address" - address_type = "EXTERNAL" - region = var.region -} - -resource "google_compute_forwarding_rule" "worker_http_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-http-lb-forward-rule" - - ip_address = google_compute_address.worker_lb[count.index].address - port_range = "80" - - target = google_compute_target_pool.worker_lb[count.index].id -} - -resource "google_compute_forwarding_rule" "worker_https_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-https-lb-forward-rule" - - ip_address = google_compute_address.worker_lb[count.index].address - port_range = "443" - - target = google_compute_target_pool.worker_lb[count.index].id -} - -resource "google_compute_target_pool" "worker_lb" { - count = length(var.ingress_whitelist) > 0 ? 1 : 0 - - name = "${var.prefix}-worker-lb-pool" - instances = local.worker_target_list -} - -resource "google_compute_firewall" "extra_ingress_firewall" { - for_each = { - for name, firewall in var.extra_ingress_firewalls : - name => firewall - } - - name = "${var.prefix}-${each.key}-ingress" - network = google_compute_network.main.name - - priority = 100 - - source_ranges = each.value.source_ranges - - target_tags = each.value.target_tags - - allow { - protocol = each.value.protocol - ports = each.value.ports - } -} diff --git a/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf b/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf deleted file mode 100644 index d0ffaa93ea9..00000000000 --- a/contrib/terraform/gcp/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,27 +0,0 @@ -output "master_ip_addresses" { - value = { - for key, instance in google_compute_instance.master : - instance.name => { - "private_ip" = instance.network_interface.0.network_ip - "public_ip" = instance.network_interface.0.access_config.0.nat_ip - } - } -} - -output "worker_ip_addresses" { - value = { - for key, instance in google_compute_instance.worker : - instance.name => { - "private_ip" = instance.network_interface.0.network_ip - "public_ip" = instance.network_interface.0.access_config.0.nat_ip - } - } -} - -output "ingress_controller_lb_ip_address" { - value = length(var.ingress_whitelist) > 0 ? google_compute_address.worker_lb.0.address : "" -} - -output "control_plane_lb_ip_address" { - value = length(var.api_server_whitelist) > 0 ? google_compute_forwarding_rule.master_lb.0.ip_address : "" -} diff --git a/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf b/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index bb8d23be06f..00000000000 --- a/contrib/terraform/gcp/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,86 +0,0 @@ -variable "region" { - type = string -} - -variable "prefix" {} - -variable "machines" { - type = map(object({ - node_type = string - size = string - zone = string - additional_disks = map(object({ - size = number - })) - boot_disk = object({ - image_name = string - size = number - }) - })) -} - -variable "master_sa_email" { - type = string -} - -variable "master_sa_scopes" { - type = list(string) -} - -variable "master_preemptible" { - type = bool -} - -variable "master_additional_disk_type" { - type = string -} - -variable "worker_sa_email" { - type = string -} - -variable "worker_sa_scopes" { - type = list(string) -} - -variable "worker_preemptible" { - type = bool -} - -variable "worker_additional_disk_type" { - type = string -} - -variable "ssh_pub_key" {} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "private_network_cidr" { - default = "10.0.10.0/24" -} - -variable "extra_ingress_firewalls" { - type = map(object({ - source_ranges = set(string) - protocol = string - ports = list(string) - target_tags = set(string) - })) - - default = {} -} diff --git a/contrib/terraform/gcp/output.tf b/contrib/terraform/gcp/output.tf deleted file mode 100644 index 09bf7fa4a12..00000000000 --- a/contrib/terraform/gcp/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ips" { - value = module.kubernetes.master_ip_addresses -} - -output "worker_ips" { - value = module.kubernetes.worker_ip_addresses -} - -output "ingress_controller_lb_ip_address" { - value = module.kubernetes.ingress_controller_lb_ip_address -} - -output "control_plane_lb_ip_address" { - value = module.kubernetes.control_plane_lb_ip_address -} diff --git a/contrib/terraform/gcp/tfvars.json b/contrib/terraform/gcp/tfvars.json deleted file mode 100644 index 056b8fe80be..00000000000 --- a/contrib/terraform/gcp/tfvars.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "gcp_project_id": "GCP_PROJECT_ID", - "region": "us-central1", - "ssh_pub_key": "~/.ssh/id_rsa.pub", - - "keyfile_location": "service-account.json", - - "prefix": "development", - - "ssh_whitelist": [ - "1.2.3.4/32" - ], - "api_server_whitelist": [ - "1.2.3.4/32" - ], - "nodeport_whitelist": [ - "1.2.3.4/32" - ], - "ingress_whitelist": [ - "0.0.0.0/0" - ], - - "machines": { - "master-0": { - "node_type": "master", - "size": "n1-standard-2", - "zone": "us-central1-a", - "additional_disks": {}, - "boot_disk": { - "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", - "size": 50 - } - }, - "worker-0": { - "node_type": "worker", - "size": "n1-standard-8", - "zone": "us-central1-a", - "additional_disks": { - "extra-disk-1": { - "size": 100 - } - }, - "boot_disk": { - "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", - "size": 50 - } - }, - "worker-1": { - "node_type": "worker", - "size": "n1-standard-8", - "zone": "us-central1-a", - "additional_disks": { - "extra-disk-1": { - "size": 100 - } - }, - "boot_disk": { - "image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118", - "size": 50 - } - } - } -} diff --git a/contrib/terraform/gcp/variables.tf b/contrib/terraform/gcp/variables.tf deleted file mode 100644 index 3e960232a97..00000000000 --- a/contrib/terraform/gcp/variables.tf +++ /dev/null @@ -1,108 +0,0 @@ -variable keyfile_location { - description = "Location of the json keyfile to use with the google provider" - type = string -} - -variable region { - description = "Region of all resources" - type = string -} - -variable gcp_project_id { - description = "ID of the project" - type = string -} - -variable prefix { - description = "Prefix for resource names" - default = "default" -} - -variable machines { - description = "Cluster machines" - type = map(object({ - node_type = string - size = string - zone = string - additional_disks = map(object({ - size = number - })) - boot_disk = object({ - image_name = string - size = number - }) - })) -} - -variable "master_sa_email" { - type = string - default = "" -} - -variable "master_sa_scopes" { - type = list(string) - default = ["https://www.googleapis.com/auth/cloud-platform"] -} - -variable "master_preemptible" { - type = bool - default = false -} - -variable "master_additional_disk_type" { - type = string - default = "pd-ssd" -} - -variable "worker_sa_email" { - type = string - default = "" -} - -variable "worker_sa_scopes" { - type = list(string) - default = ["https://www.googleapis.com/auth/cloud-platform"] -} - -variable "worker_preemptible" { - type = bool - default = false -} - -variable "worker_additional_disk_type" { - type = string - default = "pd-ssd" -} - -variable ssh_pub_key { - description = "Path to public SSH key file which is injected into the VMs." - type = string -} - -variable ssh_whitelist { - type = list(string) -} - -variable api_server_whitelist { - type = list(string) -} - -variable nodeport_whitelist { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "extra_ingress_firewalls" { - type = map(object({ - source_ranges = set(string) - protocol = string - ports = list(string) - target_tags = set(string) - })) - - default = {} -} diff --git a/contrib/terraform/group_vars b/contrib/terraform/group_vars deleted file mode 120000 index 4dd828e8e58..00000000000 --- a/contrib/terraform/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../inventory/local/group_vars \ No newline at end of file diff --git a/contrib/terraform/hetzner/README.md b/contrib/terraform/hetzner/README.md deleted file mode 100644 index 79e879c4fe6..00000000000 --- a/contrib/terraform/hetzner/README.md +++ /dev/null @@ -1,122 +0,0 @@ -# Kubernetes on Hetzner with Terraform - -Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster -+--------------------------+ -| +--------------+ | -| | +--------------+ | -| --> | | | | -| | | Master/etcd | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -| ^ | -| | | -| v | -| +--------------+ | -| | +--------------+ | -| --> | | | | -| | | Worker | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -+--------------------------+ -``` - -The nodes uses a private network for node to node communication and a public interface for all external communication. - -## Requirements - -* Terraform 0.14.0 or newer - -## Quickstart - -NOTE: Assumes you are at the root of the kubespray repo. - -For authentication in your cluster you can use the environment variables. - -```bash -export HCLOUD_TOKEN=api-token -``` - -Copy the cluster configuration file. - -```bash -CLUSTER=my-hetzner-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/ -cd inventory/$CLUSTER -``` - -Edit `default.tfvars` to match your requirement. - -Flatcar Container Linux instead of the basic Hetzner Images. - -```bash -cd ../../contrib/terraform/hetzner -``` - -Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and -comment out the `#source = "./modules/kubernetes-cluster"`. - -activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into -Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead. - -Run Terraform to create the infrastructure. - -```bash -cd ./kubespray -terraform -chdir=./contrib/terraform/hetzner/ init -terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can use the inventory file with kubespray to set up a cluster. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -You can setup Kubernetes with kubespray using the generated inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Cloud controller - -For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver). - -Please read the instructions in both repos on how to install it. - -## Teardown - -You can teardown your infrastructure using the following Terraform command: - -```bash -cd ./kubespray -terraform -chdir=./contrib/terraform/hetzner/ destroy --var-file=../../../inventory/$CLUSTER/default.tfvars -``` - -## Variables - -* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `zone`: The zone where to run the cluster -* `network_zone`: the network zone where the cluster is running -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `size`: Size of the VM - * `image`: The image to use for the VM -* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes -* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server -* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) -* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443 diff --git a/contrib/terraform/hetzner/default.tfvars b/contrib/terraform/hetzner/default.tfvars deleted file mode 100644 index 4e70bf1d938..00000000000 --- a/contrib/terraform/hetzner/default.tfvars +++ /dev/null @@ -1,46 +0,0 @@ -prefix = "default" -zone = "hel1" -network_zone = "eu-central" -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -ssh_private_key_path = "~/.ssh/id_rsa" - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-0" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-1" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ingress_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/hetzner/main.tf b/contrib/terraform/hetzner/main.tf deleted file mode 100644 index 8e38cee302e..00000000000 --- a/contrib/terraform/hetzner/main.tf +++ /dev/null @@ -1,57 +0,0 @@ -provider "hcloud" {} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - # source = "./modules/kubernetes-cluster-flatcar" - - prefix = var.prefix - - zone = var.zone - - machines = var.machines - - #only for flatcar - #ssh_private_key_path = var.ssh_private_key_path - - ssh_public_keys = var.ssh_public_keys - network_zone = var.network_zone - - ssh_whitelist = var.ssh_whitelist - api_server_whitelist = var.api_server_whitelist - nodeport_whitelist = var.nodeport_whitelist - ingress_whitelist = var.ingress_whitelist -} - -# -# Generate ansible inventory -# - -locals { - inventory = templatefile( - "${path.module}/templates/inventory.tpl", - { - connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", - keys(module.kubernetes.master_ip_addresses), - values(module.kubernetes.master_ip_addresses).*.public_ip, - values(module.kubernetes.master_ip_addresses).*.private_ip, - range(1, length(module.kubernetes.master_ip_addresses) + 1))) - connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", - keys(module.kubernetes.worker_ip_addresses), - values(module.kubernetes.worker_ip_addresses).*.public_ip, - values(module.kubernetes.worker_ip_addresses).*.private_ip)) - list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) - list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) - network_id = module.kubernetes.network_id - } - ) -} - -resource "null_resource" "inventories" { - provisioner "local-exec" { - command = "echo '${local.inventory}' > ${var.inventory_file}" - } - - triggers = { - template = local.inventory - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/main.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/main.tf deleted file mode 100644 index b54d360bff3..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/main.tf +++ /dev/null @@ -1,144 +0,0 @@ -resource "hcloud_network" "kubernetes" { - name = "${var.prefix}-network" - ip_range = var.private_network_cidr -} - -resource "hcloud_network_subnet" "kubernetes" { - type = "cloud" - network_id = hcloud_network.kubernetes.id - network_zone = var.network_zone - ip_range = var.private_subnet_cidr -} - -resource "hcloud_ssh_key" "first" { - name = var.prefix - public_key = var.ssh_public_keys.0 -} - -resource "hcloud_server" "machine" { - for_each = { - for name, machine in var.machines : - name => machine - } - - name = "${var.prefix}-${each.key}" - ssh_keys = [hcloud_ssh_key.first.id] - # boot into rescue OS - rescue = "linux64" - # dummy value for the OS because Flatcar is not available - image = each.value.image - server_type = each.value.size - location = var.zone - connection { - host = self.ipv4_address - timeout = "5m" - private_key = file(var.ssh_private_key_path) - } - firewall_ids = each.value.node_type == "master" ? [hcloud_firewall.master.id] : [hcloud_firewall.worker.id] - provisioner "file" { - content = data.ct_config.machine-ignitions[each.key].rendered - destination = "/root/ignition.json" - } - - provisioner "remote-exec" { - inline = [ - "set -ex", - "apt update", - "apt install -y gawk", - "curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/flatcar/init/flatcar-master/bin/flatcar-install", - "chmod +x flatcar-install", - "./flatcar-install -s -i /root/ignition.json -C stable", - "shutdown -r +1", - ] - } - - # optional: - provisioner "remote-exec" { - connection { - host = self.ipv4_address - private_key = file(var.ssh_private_key_path) - timeout = "3m" - user = var.user_flatcar - } - - inline = [ - "sudo hostnamectl set-hostname ${self.name}", - ] - } -} - -resource "hcloud_server_network" "machine" { - for_each = { - for name, machine in var.machines : - name => hcloud_server.machine[name] - } - server_id = each.value.id - subnet_id = hcloud_network_subnet.kubernetes.id -} - -data "ct_config" "machine-ignitions" { - for_each = { - for name, machine in var.machines : - name => machine - } - - strict = false - content = templatefile( - "${path.module}/templates/machine.yaml.tmpl", - { - ssh_keys = jsonencode(var.ssh_public_keys) - user_flatcar = var.user_flatcar - name = each.key - } - ) -} - -resource "hcloud_firewall" "master" { - name = "${var.prefix}-master-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "6443" - source_ips = var.api_server_whitelist - } -} - -resource "hcloud_firewall" "worker" { - name = "${var.prefix}-worker-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "80" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "443" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "30000-32767" - source_ips = var.nodeport_whitelist - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/outputs.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/outputs.tf deleted file mode 100644 index be524deb66d..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/outputs.tf +++ /dev/null @@ -1,29 +0,0 @@ -output "master_ip_addresses" { - value = { - for name, machine in var.machines : - name => { - "private_ip" = hcloud_server_network.machine[name].ip - "public_ip" = hcloud_server.machine[name].ipv4_address - } - if machine.node_type == "master" - } -} - -output "worker_ip_addresses" { - value = { - for name, machine in var.machines : - name => { - "private_ip" = hcloud_server_network.machine[name].ip - "public_ip" = hcloud_server.machine[name].ipv4_address - } - if machine.node_type == "worker" - } -} - -output "cluster_private_network_cidr" { - value = var.private_subnet_cidr -} - -output "network_id" { - value = hcloud_network.kubernetes.id -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/templates/machine.yaml.tmpl b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/templates/machine.yaml.tmpl deleted file mode 100644 index 95ce1d867ad..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/templates/machine.yaml.tmpl +++ /dev/null @@ -1,19 +0,0 @@ -variant: flatcar -version: 1.0.0 - -passwd: - users: - - name: ${user_flatcar} - ssh_authorized_keys: ${ssh_keys} - -storage: - files: - - path: /home/core/works - filesystem: root - mode: 0755 - contents: - inline: | - #!/bin/bash - set -euo pipefail - hostname="$(hostname)" - echo My name is ${name} and the hostname is $${hostname} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/variables.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/variables.tf deleted file mode 100644 index 809377946ec..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/variables.tf +++ /dev/null @@ -1,60 +0,0 @@ - -variable "zone" { - type = string - default = "fsn1" -} - -variable "prefix" { - default = "k8s" -} - -variable "user_flatcar" { - type = string - default = "core" -} - -variable "machines" { - type = map(object({ - node_type = string - size = string - image = string - })) -} - - - -variable "ssh_public_keys" { - type = list(string) -} - -variable "ssh_private_key_path" { - type = string - default = "~/.ssh/id_rsa" -} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) -} - -variable "private_network_cidr" { - default = "10.0.0.0/16" -} - -variable "private_subnet_cidr" { - default = "10.0.10.0/24" -} -variable "network_zone" { - default = "eu-central" -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/versions.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/versions.tf deleted file mode 100644 index ac98e278469..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster-flatcar/versions.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - required_providers { - hcloud = { - source = "hetznercloud/hcloud" - } - ct = { - source = "poseidon/ct" - version = "0.11.0" - } - null = { - source = "hashicorp/null" - } - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 2a0e458815f..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,122 +0,0 @@ -resource "hcloud_network" "kubernetes" { - name = "${var.prefix}-network" - ip_range = var.private_network_cidr -} - -resource "hcloud_network_subnet" "kubernetes" { - type = "cloud" - network_id = hcloud_network.kubernetes.id - network_zone = var.network_zone - ip_range = var.private_subnet_cidr -} - -resource "hcloud_server" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - image = each.value.image - server_type = each.value.size - location = var.zone - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - ssh_public_keys = var.ssh_public_keys - } - ) - - firewall_ids = [hcloud_firewall.master.id] -} - -resource "hcloud_server_network" "master" { - for_each = hcloud_server.master - - server_id = each.value.id - - subnet_id = hcloud_network_subnet.kubernetes.id -} - -resource "hcloud_server" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - image = each.value.image - server_type = each.value.size - location = var.zone - - user_data = templatefile( - "${path.module}/templates/cloud-init.tmpl", - { - ssh_public_keys = var.ssh_public_keys - } - ) - - firewall_ids = [hcloud_firewall.worker.id] - -} - -resource "hcloud_server_network" "worker" { - for_each = hcloud_server.worker - - server_id = each.value.id - - subnet_id = hcloud_network_subnet.kubernetes.id -} - -resource "hcloud_firewall" "master" { - name = "${var.prefix}-master-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "6443" - source_ips = var.api_server_whitelist - } -} - -resource "hcloud_firewall" "worker" { - name = "${var.prefix}-worker-firewall" - - rule { - direction = "in" - protocol = "tcp" - port = "22" - source_ips = var.ssh_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "80" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "443" - source_ips = var.ingress_whitelist - } - - rule { - direction = "in" - protocol = "tcp" - port = "30000-32767" - source_ips = var.nodeport_whitelist - } -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf deleted file mode 100644 index 5c31aaa003c..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,27 +0,0 @@ -output "master_ip_addresses" { - value = { - for key, instance in hcloud_server.master : - instance.name => { - "private_ip" = hcloud_server_network.master[key].ip - "public_ip" = hcloud_server.master[key].ipv4_address - } - } -} - -output "worker_ip_addresses" { - value = { - for key, instance in hcloud_server.worker : - instance.name => { - "private_ip" = hcloud_server_network.worker[key].ip - "public_ip" = hcloud_server.worker[key].ipv4_address - } - } -} - -output "cluster_private_network_cidr" { - value = var.private_subnet_cidr -} - -output "network_id" { - value = hcloud_network.kubernetes.id -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl b/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl deleted file mode 100644 index 02a4e2dd084..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl +++ /dev/null @@ -1,16 +0,0 @@ -#cloud-config - -users: - - default - - name: ubuntu - shell: /bin/bash - sudo: "ALL=(ALL) NOPASSWD:ALL" - ssh_authorized_keys: - %{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} - %{ endfor ~} - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index 7486e0806a5..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,44 +0,0 @@ -variable "zone" { - type = string -} - -variable "prefix" {} - -variable "machines" { - type = map(object({ - node_type = string - size = string - image = string - })) -} - -variable "ssh_public_keys" { - type = list(string) -} - -variable "ssh_whitelist" { - type = list(string) -} - -variable "api_server_whitelist" { - type = list(string) -} - -variable "nodeport_whitelist" { - type = list(string) -} - -variable "ingress_whitelist" { - type = list(string) -} - -variable "private_network_cidr" { - default = "10.0.0.0/16" -} - -variable "private_subnet_cidr" { - default = "10.0.10.0/24" -} -variable "network_zone" { - default = "eu-central" -} diff --git a/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf b/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 78bc5047b07..00000000000 --- a/contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - hcloud = { - source = "hetznercloud/hcloud" - version = "1.38.2" - } - } - required_version = ">= 0.14" -} diff --git a/contrib/terraform/hetzner/output.tf b/contrib/terraform/hetzner/output.tf deleted file mode 100644 index 0336f72ca80..00000000000 --- a/contrib/terraform/hetzner/output.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "master_ips" { - value = module.kubernetes.master_ip_addresses -} - -output "worker_ips" { - value = module.kubernetes.worker_ip_addresses -} diff --git a/contrib/terraform/hetzner/sample-inventory/cluster.tfvars b/contrib/terraform/hetzner/sample-inventory/cluster.tfvars deleted file mode 100644 index 4e70bf1d938..00000000000 --- a/contrib/terraform/hetzner/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,46 +0,0 @@ -prefix = "default" -zone = "hel1" -network_zone = "eu-central" -inventory_file = "inventory.ini" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -ssh_private_key_path = "~/.ssh/id_rsa" - -machines = { - "master-0" : { - "node_type" : "master", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-0" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - }, - "worker-1" : { - "node_type" : "worker", - "size" : "cx21", - "image" : "ubuntu-22.04", - } -} - -nodeport_whitelist = [ - "0.0.0.0/0" -] - -ingress_whitelist = [ - "0.0.0.0/0" -] - -ssh_whitelist = [ - "0.0.0.0/0" -] - -api_server_whitelist = [ - "0.0.0.0/0" -] diff --git a/contrib/terraform/hetzner/sample-inventory/group_vars b/contrib/terraform/hetzner/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/hetzner/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/hetzner/templates/inventory.tpl b/contrib/terraform/hetzner/templates/inventory.tpl deleted file mode 100644 index 08d63693f97..00000000000 --- a/contrib/terraform/hetzner/templates/inventory.tpl +++ /dev/null @@ -1,19 +0,0 @@ -[all] -${connection_strings_master} -${connection_strings_worker} - -[kube_control_plane] -${list_master} - -[etcd] -${list_master} - -[kube_node] -${list_worker} - -[k8s_cluster:children] -kube_control_plane -kube_node - -[k8s_cluster:vars] -network_id=${network_id} diff --git a/contrib/terraform/hetzner/variables.tf b/contrib/terraform/hetzner/variables.tf deleted file mode 100644 index 049ce0d4227..00000000000 --- a/contrib/terraform/hetzner/variables.tf +++ /dev/null @@ -1,56 +0,0 @@ -variable "zone" { - description = "The zone where to run the cluster" -} -variable "network_zone" { - description = "The network zone where the cluster is running" - default = "eu-central" -} - -variable "prefix" { - description = "Prefix for resource names" - default = "default" -} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - size = string - image = string - })) -} - -variable "ssh_public_keys" { - description = "Public SSH key which are injected into the VMs." - type = list(string) -} - -variable "ssh_private_key_path" { - description = "Private SSH key which connect to the VMs." - type = string - default = "~/.ssh/id_rsa" -} - -variable "ssh_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for ssh" - type = list(string) -} - -variable "api_server_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" - type = list(string) -} - -variable "nodeport_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" - type = list(string) -} - -variable "ingress_whitelist" { - description = "List of IP ranges (CIDR) to whitelist for HTTP" - type = list(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} diff --git a/contrib/terraform/hetzner/versions.tf b/contrib/terraform/hetzner/versions.tf deleted file mode 100644 index e331beb4582..00000000000 --- a/contrib/terraform/hetzner/versions.tf +++ /dev/null @@ -1,12 +0,0 @@ -terraform { - required_providers { - hcloud = { - source = "hetznercloud/hcloud" - version = "1.38.2" - } - null = { - source = "hashicorp/null" - } - } - required_version = ">= 0.14" -} diff --git a/contrib/terraform/nifcloud/.gitignore b/contrib/terraform/nifcloud/.gitignore deleted file mode 100644 index 9adadc30ac2..00000000000 --- a/contrib/terraform/nifcloud/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.tfstate* -.terraform.lock.hcl -.terraform - -sample-inventory/inventory.ini diff --git a/contrib/terraform/nifcloud/README.md b/contrib/terraform/nifcloud/README.md deleted file mode 100644 index a6dcf014855..00000000000 --- a/contrib/terraform/nifcloud/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Kubernetes on NIFCLOUD with Terraform - -Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray - -## Overview - -The setup looks like following - -```text - Kubernetes cluster - +----------------------------+ -+---------------+ | +--------------------+ | -| | | | +--------------------+ | -| API server LB +---------> | | | | -| | | | | Control Plane/etcd | | -+---------------+ | | | node(s) | | - | +-+ | | - | +--------------------+ | - | ^ | - | | | - | v | - | +--------------------+ | - | | +--------------------+ | - | | | | | - | | | Worker | | - | | | node(s) | | - | +-+ | | - | +--------------------+ | - +----------------------------+ -``` - -## Requirements - -* Terraform 1.3.7 - -## Quickstart - -### Export Variables - -* Your NIFCLOUD credentials: - - ```bash - export NIFCLOUD_ACCESS_KEY_ID= - export NIFCLOUD_SECRET_ACCESS_KEY= - ``` - -* The SSH KEY used to connect to the instance: - * FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm) - - ```bash - export TF_VAR_SSHKEY_NAME= - ``` - -* The IP address to connect to bastion server: - - ```bash - export TF_VAR_working_instance_ip=$(curl ifconfig.me) - ``` - -### Create The Infrastructure - -* Run terraform: - - ```bash - terraform init - terraform apply -var-file ./sample-inventory/cluster.tfvars - ``` - -### Setup The Kubernetes - -* Generate cluster configuration file: - - ```bash - ./generate-inventory.sh > sample-inventory/inventory.ini - ``` - -* Export Variables: - - ```bash - BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip') - API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb') - CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip') - export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\"" - ``` - -* Set ssh-agent" - - ```bash - eval `ssh-agent` - ssh-add - ``` - -* Run cluster.yml playbook: - - ```bash - cd ./../../../ - ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml - ``` - -### Connecting to Kubernetes - -* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost -* Fetching kubeconfig file: - - ```bash - mkdir -p ~/.kube - scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config - ``` - -* Rewrite /etc/hosts - - ```bash - sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts - ``` - -* Run kubectl - - ```bash - kubectl get node - ``` - -## Variables - -* `region`: Region where to run the cluster -* `az`: Availability zone where to run the cluster -* `private_ip_bn`: Private ip address of bastion server -* `private_network_cidr`: Subnet of private network -* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name - * `private_ip`: private ip address of machine -* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name - * `private_ip`: private ip address of machine -* `instance_key_name`: The key name of the Key Pair to use for the instance -* `instance_type_bn`: The instance type of bastion server -* `instance_type_wk`: The instance type of worker node -* `instance_type_cp`: The instance type of control plane -* `image_name`: OS image used for the instance -* `working_instance_ip`: The IP address to connect to bastion server -* `accounting_type`: Accounting type. (1: monthly, 2: pay per use) diff --git a/contrib/terraform/nifcloud/generate-inventory.sh b/contrib/terraform/nifcloud/generate-inventory.sh deleted file mode 100755 index 5d90eb5f426..00000000000 --- a/contrib/terraform/nifcloud/generate-inventory.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -# -# Generates a inventory file based on the terraform output. -# After provisioning a cluster, simply run this command and supply the terraform state file -# Default state file is terraform.tfstate -# - -set -e - -TF_OUT=$(terraform output -json) - -CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}")) -WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}")) -mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}")) -mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}")) - -API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}")) - -echo "[all]" -# Generate control plane hosts -i=1 -for name in "${CONTROL_PLANE_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}")) - echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}" - i=$(( i + 1 )) -done - -# Generate worker hosts -for name in "${WORKER_NAMES[@]}"; do - private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}")) - echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}" -done - -API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}")) - -echo "" -echo "[all:vars]" -echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']" -echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}" - - -echo "" -echo "[kube_control_plane]" -for name in "${CONTROL_PLANE_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[etcd]" -for name in "${CONTROL_PLANE_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[kube_node]" -for name in "${WORKER_NAMES[@]}"; do - echo "${name}" -done - -echo "" -echo "[k8s_cluster:children]" -echo "kube_control_plane" -echo "kube_node" diff --git a/contrib/terraform/nifcloud/main.tf b/contrib/terraform/nifcloud/main.tf deleted file mode 100644 index d5a070967bc..00000000000 --- a/contrib/terraform/nifcloud/main.tf +++ /dev/null @@ -1,36 +0,0 @@ -provider "nifcloud" { - region = var.region -} - -module "kubernetes_cluster" { - source = "./modules/kubernetes-cluster" - - availability_zone = var.az - prefix = "dev" - - private_network_cidr = var.private_network_cidr - - instance_key_name = var.instance_key_name - instances_cp = var.instances_cp - instances_wk = var.instances_wk - image_name = var.image_name - - instance_type_bn = var.instance_type_bn - instance_type_cp = var.instance_type_cp - instance_type_wk = var.instance_type_wk - - private_ip_bn = var.private_ip_bn - - additional_lb_filter = [var.working_instance_ip] -} - -resource "nifcloud_security_group_rule" "ssh_from_bastion" { - security_group_names = [ - module.kubernetes_cluster.security_group_name.bastion - ] - type = "IN" - from_port = 22 - to_port = 22 - protocol = "TCP" - cidr_ip = var.working_instance_ip -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 0e5fd383da9..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,301 +0,0 @@ -################################################# -## -## Local variables -## -locals { - # e.g. east-11 is 11 - az_num = reverse(split("-", var.availability_zone))[0] - # e.g. east-11 is e11 - az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}" - - # Port used by the protocol - port_ssh = 22 - port_kubectl = 6443 - port_kubelet = 10250 - - # calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements - port_bgp = 179 - port_vxlan = 4789 - port_etcd = 2379 -} - -################################################# -## -## General -## - -# data -data "nifcloud_image" "this" { - image_name = var.image_name -} - -# private lan -resource "nifcloud_private_lan" "this" { - private_lan_name = "${var.prefix}lan" - availability_zone = var.availability_zone - cidr_block = var.private_network_cidr - accounting_type = var.accounting_type -} - -################################################# -## -## Bastion -## -resource "nifcloud_security_group" "bn" { - group_name = "${var.prefix}bn" - description = "${var.prefix} bastion" - availability_zone = var.availability_zone -} - -resource "nifcloud_instance" "bn" { - - instance_id = "${local.az_short_name}${var.prefix}bn01" - security_group = nifcloud_security_group.bn.group_name - instance_type = var.instance_type_bn - - user_data = templatefile("${path.module}/templates/userdata.tftpl", { - private_ip_address = var.private_ip_bn - ssh_port = local.port_ssh - hostname = "${local.az_short_name}${var.prefix}bn01" - }) - - availability_zone = var.availability_zone - accounting_type = var.accounting_type - image_id = data.nifcloud_image.this.image_id - key_name = var.instance_key_name - - network_interface { - network_id = "net-COMMON_GLOBAL" - } - network_interface { - network_id = nifcloud_private_lan.this.network_id - ip_address = "static" - } - - # The image_id changes when the OS image type is demoted from standard to public. - lifecycle { - ignore_changes = [ - image_id, - user_data, - ] - } -} - -################################################# -## -## Control Plane -## -resource "nifcloud_security_group" "cp" { - group_name = "${var.prefix}cp" - description = "${var.prefix} control plane" - availability_zone = var.availability_zone -} - -resource "nifcloud_instance" "cp" { - for_each = var.instances_cp - - instance_id = "${local.az_short_name}${var.prefix}${each.key}" - security_group = nifcloud_security_group.cp.group_name - instance_type = var.instance_type_cp - user_data = templatefile("${path.module}/templates/userdata.tftpl", { - private_ip_address = each.value.private_ip - ssh_port = local.port_ssh - hostname = "${local.az_short_name}${var.prefix}${each.key}" - }) - - availability_zone = var.availability_zone - accounting_type = var.accounting_type - image_id = data.nifcloud_image.this.image_id - key_name = var.instance_key_name - - network_interface { - network_id = "net-COMMON_GLOBAL" - } - network_interface { - network_id = nifcloud_private_lan.this.network_id - ip_address = "static" - } - - # The image_id changes when the OS image type is demoted from standard to public. - lifecycle { - ignore_changes = [ - image_id, - user_data, - ] - } -} - -resource "nifcloud_load_balancer" "this" { - load_balancer_name = "${local.az_short_name}${var.prefix}cp" - accounting_type = var.accounting_type - balancing_type = 1 // Round-Robin - load_balancer_port = local.port_kubectl - instance_port = local.port_kubectl - instances = [for v in nifcloud_instance.cp : v.instance_id] - filter = concat( - [for k, v in nifcloud_instance.cp : v.public_ip], - [for k, v in nifcloud_instance.wk : v.public_ip], - var.additional_lb_filter, - ) - filter_type = 1 // Allow -} - -################################################# -## -## Worker -## -resource "nifcloud_security_group" "wk" { - group_name = "${var.prefix}wk" - description = "${var.prefix} worker" - availability_zone = var.availability_zone -} - -resource "nifcloud_instance" "wk" { - for_each = var.instances_wk - - instance_id = "${local.az_short_name}${var.prefix}${each.key}" - security_group = nifcloud_security_group.wk.group_name - instance_type = var.instance_type_wk - user_data = templatefile("${path.module}/templates/userdata.tftpl", { - private_ip_address = each.value.private_ip - ssh_port = local.port_ssh - hostname = "${local.az_short_name}${var.prefix}${each.key}" - }) - - availability_zone = var.availability_zone - accounting_type = var.accounting_type - image_id = data.nifcloud_image.this.image_id - key_name = var.instance_key_name - - network_interface { - network_id = "net-COMMON_GLOBAL" - } - network_interface { - network_id = nifcloud_private_lan.this.network_id - ip_address = "static" - } - - # The image_id changes when the OS image type is demoted from standard to public. - lifecycle { - ignore_changes = [ - image_id, - user_data, - ] - } -} - -################################################# -## -## Security Group Rule: Kubernetes -## - -# ssh -resource "nifcloud_security_group_rule" "ssh_from_bastion" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_ssh - to_port = local.port_ssh - protocol = "TCP" - source_security_group_name = nifcloud_security_group.bn.group_name -} - -# kubectl -resource "nifcloud_security_group_rule" "kubectl_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_kubectl - to_port = local.port_kubectl - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -# kubelet -resource "nifcloud_security_group_rule" "kubelet_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_kubelet - to_port = local.port_kubelet - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -resource "nifcloud_security_group_rule" "kubelet_from_control_plane" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - ] - type = "IN" - from_port = local.port_kubelet - to_port = local.port_kubelet - protocol = "TCP" - source_security_group_name = nifcloud_security_group.cp.group_name -} - -################################################# -## -## Security Group Rule: calico -## - -# vslan -resource "nifcloud_security_group_rule" "vxlan_from_control_plane" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - ] - type = "IN" - from_port = local.port_vxlan - to_port = local.port_vxlan - protocol = "UDP" - source_security_group_name = nifcloud_security_group.cp.group_name -} - -resource "nifcloud_security_group_rule" "vxlan_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_vxlan - to_port = local.port_vxlan - protocol = "UDP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -# bgp -resource "nifcloud_security_group_rule" "bgp_from_control_plane" { - security_group_names = [ - nifcloud_security_group.wk.group_name, - ] - type = "IN" - from_port = local.port_bgp - to_port = local.port_bgp - protocol = "TCP" - source_security_group_name = nifcloud_security_group.cp.group_name -} - -resource "nifcloud_security_group_rule" "bgp_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_bgp - to_port = local.port_bgp - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} - -# etcd -resource "nifcloud_security_group_rule" "etcd_from_worker" { - security_group_names = [ - nifcloud_security_group.cp.group_name, - ] - type = "IN" - from_port = local.port_etcd - to_port = local.port_etcd - protocol = "TCP" - source_security_group_name = nifcloud_security_group.wk.group_name -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf deleted file mode 100644 index a6232f821da..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf +++ /dev/null @@ -1,48 +0,0 @@ -output "control_plane_lb" { - description = "The DNS name of LB for control plane" - value = nifcloud_load_balancer.this.dns_name -} - -output "security_group_name" { - description = "The security group used in the cluster" - value = { - bastion = nifcloud_security_group.bn.group_name, - control_plane = nifcloud_security_group.cp.group_name, - worker = nifcloud_security_group.wk.group_name, - } -} - -output "private_network_id" { - description = "The private network used in the cluster" - value = nifcloud_private_lan.this.id -} - -output "bastion_info" { - description = "The basion information in cluster" - value = { (nifcloud_instance.bn.instance_id) : { - instance_id = nifcloud_instance.bn.instance_id, - unique_id = nifcloud_instance.bn.unique_id, - private_ip = nifcloud_instance.bn.private_ip, - public_ip = nifcloud_instance.bn.public_ip, - } } -} - -output "worker_info" { - description = "The worker information in cluster" - value = { for v in nifcloud_instance.wk : v.instance_id => { - instance_id = v.instance_id, - unique_id = v.unique_id, - private_ip = v.private_ip, - public_ip = v.public_ip, - } } -} - -output "control_plane_info" { - description = "The control plane information in cluster" - value = { for v in nifcloud_instance.cp : v.instance_id => { - instance_id = v.instance_id, - unique_id = v.unique_id, - private_ip = v.private_ip, - public_ip = v.public_ip, - } } -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl b/contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl deleted file mode 100644 index 55e626a2a0f..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -################################################# -## -## IP Address -## -configure_private_ip_address () { - cat << EOS > /etc/netplan/01-netcfg.yaml -network: - version: 2 - renderer: networkd - ethernets: - ens192: - dhcp4: yes - dhcp6: yes - dhcp-identifier: mac - ens224: - dhcp4: no - dhcp6: no - addresses: [${private_ip_address}] -EOS - netplan apply -} -configure_private_ip_address - -################################################# -## -## SSH -## -configure_ssh_port () { - sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config -} -configure_ssh_port - -################################################# -## -## Hostname -## -hostnamectl set-hostname ${hostname} - -################################################# -## -## Disable swap files genereated by systemd-gpt-auto-generator -## -systemctl mask "dev-sda3.swap" diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf deleted file mode 100644 index 97ef4847bf2..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_version = ">=1.3.7" - required_providers { - nifcloud = { - source = "nifcloud/nifcloud" - version = ">= 1.8.0, < 2.0.0" - } - } -} diff --git a/contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf b/contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index 65c11fe2029..00000000000 --- a/contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,81 +0,0 @@ -variable "availability_zone" { - description = "The availability zone" - type = string -} - -variable "prefix" { - description = "The prefix for the entire cluster" - type = string - validation { - condition = length(var.prefix) <= 5 - error_message = "Must be a less than 5 character long." - } -} - -variable "private_network_cidr" { - description = "The subnet of private network" - type = string - validation { - condition = can(cidrnetmask(var.private_network_cidr)) - error_message = "Must be a valid IPv4 CIDR block address." - } -} - -variable "private_ip_bn" { - description = "Private IP of bastion server" - type = string -} - -variable "instances_cp" { - type = map(object({ - private_ip = string - })) -} - -variable "instances_wk" { - type = map(object({ - private_ip = string - })) -} - -variable "instance_key_name" { - description = "The key name of the Key Pair to use for the instance" - type = string -} - -variable "instance_type_bn" { - description = "The instance type of bastion server" - type = string -} - -variable "instance_type_wk" { - description = "The instance type of worker" - type = string -} - -variable "instance_type_cp" { - description = "The instance type of control plane" - type = string -} - -variable "image_name" { - description = "The name of image" - type = string -} - -variable "additional_lb_filter" { - description = "Additional LB filter" - type = list(string) -} - -variable "accounting_type" { - type = string - default = "1" - validation { - condition = anytrue([ - var.accounting_type == "1", // Monthly - var.accounting_type == "2", // Pay per use - ]) - error_message = "Must be a 1 or 2." - } -} diff --git a/contrib/terraform/nifcloud/output.tf b/contrib/terraform/nifcloud/output.tf deleted file mode 100644 index dcdeacba2c8..00000000000 --- a/contrib/terraform/nifcloud/output.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "kubernetes_cluster" { - value = module.kubernetes_cluster -} diff --git a/contrib/terraform/nifcloud/sample-inventory/cluster.tfvars b/contrib/terraform/nifcloud/sample-inventory/cluster.tfvars deleted file mode 100644 index 3410a54a886..00000000000 --- a/contrib/terraform/nifcloud/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,22 +0,0 @@ -region = "jp-west-1" -az = "west-11" - -instance_key_name = "deployerkey" - -instance_type_bn = "e-medium" -instance_type_cp = "e-medium" -instance_type_wk = "e-medium" - -private_network_cidr = "192.168.30.0/24" -instances_cp = { - "cp01" : { private_ip : "192.168.30.11/24" } - "cp02" : { private_ip : "192.168.30.12/24" } - "cp03" : { private_ip : "192.168.30.13/24" } -} -instances_wk = { - "wk01" : { private_ip : "192.168.30.21/24" } - "wk02" : { private_ip : "192.168.30.22/24" } -} -private_ip_bn = "192.168.30.10/24" - -image_name = "Ubuntu Server 22.04 LTS" diff --git a/contrib/terraform/nifcloud/sample-inventory/group_vars b/contrib/terraform/nifcloud/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/nifcloud/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/nifcloud/terraform.tf b/contrib/terraform/nifcloud/terraform.tf deleted file mode 100644 index 9a14bc665af..00000000000 --- a/contrib/terraform/nifcloud/terraform.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_version = ">=1.3.7" - required_providers { - nifcloud = { - source = "nifcloud/nifcloud" - version = "1.8.0" - } - } -} diff --git a/contrib/terraform/nifcloud/variables.tf b/contrib/terraform/nifcloud/variables.tf deleted file mode 100644 index 558655ffe8a..00000000000 --- a/contrib/terraform/nifcloud/variables.tf +++ /dev/null @@ -1,77 +0,0 @@ -variable "region" { - description = "The region" - type = string -} - -variable "az" { - description = "The availability zone" - type = string -} - -variable "private_ip_bn" { - description = "Private IP of bastion server" - type = string -} - -variable "private_network_cidr" { - description = "The subnet of private network" - type = string - validation { - condition = can(cidrnetmask(var.private_network_cidr)) - error_message = "Must be a valid IPv4 CIDR block address." - } -} - -variable "instances_cp" { - type = map(object({ - private_ip = string - })) -} - -variable "instances_wk" { - type = map(object({ - private_ip = string - })) -} - -variable "instance_key_name" { - description = "The key name of the Key Pair to use for the instance" - type = string -} - -variable "instance_type_bn" { - description = "The instance type of bastion server" - type = string -} - -variable "instance_type_wk" { - description = "The instance type of worker" - type = string -} - -variable "instance_type_cp" { - description = "The instance type of control plane" - type = string -} - -variable "image_name" { - description = "The name of image" - type = string -} - -variable "working_instance_ip" { - description = "The IP address to connect to bastion server." - type = string -} - -variable "accounting_type" { - type = string - default = "2" - validation { - condition = anytrue([ - var.accounting_type == "1", // Monthly - var.accounting_type == "2", // Pay per use - ]) - error_message = "Must be a 1 or 2." - } -} diff --git a/contrib/terraform/openstack/.gitignore b/contrib/terraform/openstack/.gitignore deleted file mode 100644 index 7e4921aa2c5..00000000000 --- a/contrib/terraform/openstack/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -.terraform -*.tfvars -!sample-inventory/cluster.tfvars -*.tfstate -*.tfstate.backup diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md deleted file mode 100644 index 73f8e39c833..00000000000 --- a/contrib/terraform/openstack/README.md +++ /dev/null @@ -1,801 +0,0 @@ -# Kubernetes on OpenStack with Terraform - -Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on -OpenStack. - -## Status - -This will install a Kubernetes cluster on an OpenStack Cloud. It should work on -most modern installs of OpenStack that support the basic services. - -### Known compatible public clouds - -- [Auro](https://auro.io/) -- [Betacloud](https://www.betacloud.io/) -- [CityCloud](https://www.citycloud.com/) -- [DreamHost](https://www.dreamhost.com/cloud/computing/) -- [ELASTX](https://elastx.se/) -- [EnterCloudSuite](https://www.entercloudsuite.com/) -- [FugaCloud](https://fuga.cloud/) -- [Open Telekom Cloud](https://cloud.telekom.de/) -- [OVH](https://www.ovh.com/) -- [Rackspace](https://www.rackspace.com/) -- [Safespring](https://www.safespring.com) -- [Ultimum](https://ultimum.io/) -- [VexxHost](https://vexxhost.com/) -- [Zetta](https://www.zetta.io/) -- [Cloudify](https://www.cloudify.ro/en) - -## Approach - -The terraform configuration inspects variables found in -[variables.tf](variables.tf) to create resources in your OpenStack cluster. -There is a [python script](../terraform.py) that reads the generated`.tfstate` -file to generate a dynamic inventory that is consumed by the main ansible script -to actually install kubernetes and stand up the cluster. - -### Networking - -The configuration includes creating a private subnet with a router to the -external net. It will allocate floating IPs from a pool and assign them to the -hosts where that makes sense. You have the option of creating bastion hosts -inside the private subnet to access the nodes there. Alternatively, a node with -a floating IP can be used as a jump host to nodes without. - -#### Using an existing router - -It is possible to use an existing router instead of creating one. To use an -existing router set the router\_id variable to the uuid of the router you wish -to use. - -For example: - -```ShellSession -router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3" -``` - -### Kubernetes Nodes - -You can create many different kubernetes topologies by setting the number of -different classes of hosts. For each class there are options for allocating -floating IP addresses or not. - -- Control plane nodes with etcd -- Control plane nodes without etcd -- Standalone etcd hosts -- Kubernetes worker nodes - -Note that the Ansible script will report an invalid configuration if you wind up -with an even number of etcd instances since that is not a valid configuration. This -restriction includes standalone etcd nodes that are deployed in a cluster along with -control plane nodes with etcd replicas. As an example, if you have three control plane -nodes with etcd replicas and three standalone etcd nodes, the script will fail since -there are now six total etcd replicas. - -### GlusterFS shared file system - -The Terraform configuration supports provisioning of an optional GlusterFS -shared file system based on a separate set of VMs. To enable this, you need to -specify: - -- the number of Gluster hosts (minimum 2) -- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks -- Other properties related to provisioning the hosts - -Even if you are using Flatcar Container Linux by Kinvolk for your cluster, you will still -need the GlusterFS VMs to be based on either Debian or RedHat based images. -Flatcar Container Linux by Kinvolk cannot serve GlusterFS, but can connect to it through -binaries available on hyperkube v1.4.3_coreos.0 or higher. - -## Requirements - -- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.14 or later -- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) -- you already have a suitable OS image in Glance -- you already have a floating IP pool created -- you have security groups enabled -- you have a pair of keys generated that can be used to secure the new hosts - -## Module Architecture - -The configuration is divided into four modules: - -- Network -- Loadbalancer -- IPs -- Compute - -The main reason for splitting the configuration up in this way is to easily -accommodate situations where floating IPs are limited by a quota or if you have -any external references to the floating IP (e.g. DNS) that would otherwise have -to be updated. - -You can force your existing IPs by modifying the compute variables in -`kubespray.tf` as follows: - -```ini -k8s_master_fips = ["151.101.129.67"] -k8s_node_fips = ["151.101.129.68"] -``` - -## Terraform - -Terraform will be used to provision all of the OpenStack resources with base software as appropriate. - -### Configuration - -#### Inventory files - -Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state): - -```ShellSession -cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER -cd inventory/$CLUSTER -ln -s ../../contrib/terraform/openstack/hosts -ln -s ../../contrib -``` - -This will be the base for subsequent Terraform commands. - -#### OpenStack access and credentials - -No provider variables are hardcoded inside `variables.tf` because Terraform -supports various authentication methods for OpenStack: the older script and -environment method (using `openrc`) as well as a newer declarative method, and -different OpenStack environments may support Identity API version 2 or 3. - -These are examples and may vary depending on your OpenStack cloud provider, -for an exhaustive list on how to authenticate on OpenStack with Terraform -please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/). - -##### Declarative method (recommended) - -The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in: - -- the current directory -- `~/.config/openstack` -- `/etc/openstack` - -`clouds.yaml`: - -```yaml -clouds: - mycloud: - auth: - auth_url: https://openstack:5000/v3 - username: "username" - project_name: "projectname" - project_id: projectid - user_domain_name: "Default" - password: "password" - region_name: "RegionOne" - interface: "public" - identity_api_version: 3 -``` - -If you have multiple clouds defined in your `clouds.yaml` file you can choose -the one you want to use with the environment variable `OS_CLOUD`: - -```ShellSession -export OS_CLOUD=mycloud -``` - -##### Openrc method - -When using classic environment variables, Terraform uses default `OS_*` -environment variables. A script suitable for your environment may be available -from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*. - -With identity v2: - -```ShellSession -source openrc - -env | grep OS - -OS_AUTH_URL=https://openstack:5000/v2.0 -OS_PROJECT_ID=projectid -OS_PROJECT_NAME=projectname -OS_USERNAME=username -OS_PASSWORD=password -OS_REGION_NAME=RegionOne -OS_INTERFACE=public -OS_IDENTITY_API_VERSION=2 -``` - -With identity v3: - -```ShellSession -source openrc - -env | grep OS - -OS_AUTH_URL=https://openstack:5000/v3 -OS_PROJECT_ID=projectid -OS_PROJECT_NAME=username -OS_PROJECT_DOMAIN_ID=default -OS_USERNAME=username -OS_PASSWORD=password -OS_REGION_NAME=RegionOne -OS_INTERFACE=public -OS_IDENTITY_API_VERSION=3 -OS_USER_DOMAIN_NAME=Default -``` - -Terraform does not support a mix of DomainName and DomainID, choose one or the other: - -- provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username - -```ShellSession -unset OS_USER_DOMAIN_NAME -export OS_USER_DOMAIN_ID=default -``` - -or - -```ShellSession -unset OS_PROJECT_DOMAIN_ID -set OS_PROJECT_DOMAIN_NAME=Default -``` - -#### Cluster variables - -The construction of the cluster is driven by values found in -[variables.tf](variables.tf). - -For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. - -|Variable | Description | -|---------|-------------| -|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. | -|`az_list` | List of Availability Zones available in your OpenStack cluster. | -|`network_name` | The name to be given to the internal network that will be generated | -|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default | -|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated | -|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. | -|`floatingip_pool` | Name of the pool from which floating IPs will be allocated | -|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. | -|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. | -|`external_net` | UUID of the external network that will be routed to | -|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` | -|`image`,`image_gfs`, `image_master` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. | -|`image_uuid`,`image_gfs_uuid`, `image_master_uuid` | UUID of the image to use in provisioning the compute resources. Should already be loaded into glance. | -|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected | -|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs | -|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses| -|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses | -|`number_of_etcd` | Number of pure etcd nodes | -|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. | -|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one | -|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. | -| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks | -|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. | -|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. | -|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default | -|`bastion_allowed_remote_ipv6_ips` | List of IPv6 CIDR allowed to initiate a SSH connection, `["::/0"]` by default | -|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default | -|`master_allowed_remote_ipv6_ips` | List of IPv6 CIDR blocks allowed to initiate an API connection, `["::/0"]` by default | -|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default | -|`bastion_allowed_ports_ipv6` | List of ports to open on bastion node for IPv6 CIDR blocks, `[]` by default | -|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default | -|`k8s_allowed_remote_ips_ipv6` | List of IPv6 CIDR allowed to initiate a SSH connection, empty by default | -|`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default | -|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default | -|`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}]` by default | -|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default | -|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default | -|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage | -|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage | -|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default | -|`node_volume_type` | Volume type of the root volume for nodes, 'Default' by default | -|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage | -|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage | -|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage | -|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) | -|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) | -|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) | -|`additional_server_groups` | Extra server groups to create. Set "policy" to the policy for the group, expected format is `{"new-server-group" = {"policy" = "anti-affinity"}}`, default: {} (to not create any extra groups) | -|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. | -|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default | -|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default | -|`k8s_nodes` | Map containing worker node definition, see explanation below | -|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` | -|`k8s_master_loadbalancer_enabled` | Enable and use an Octavia load balancer for the K8s master nodes | -|`k8s_master_loadbalancer_listener_port` | Define via which port the K8s Api should be exposed. `6443` by default | -|`k8s_master_loadbalancer_server_port` | Define via which port the K8S api is available on the master nodes. `6443` by default | -|`k8s_master_loadbalancer_public_ip` | Specify if an existing floating IP should be used for the load balancer. A new floating IP is assigned by default | - -##### k8s_nodes - -Allows a custom definition of worker nodes giving the operator full control over individual node flavor and availability zone placement. -To enable the use of this mode set the `number_of_k8s_nodes` and `number_of_k8s_nodes_no_floating_ip` variables to 0. -Then define your desired worker node configuration using the `k8s_nodes` variable. -The `az`, `flavor` and `floating_ip` parameters are mandatory. -The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes. - -```yaml -k8s_nodes: - node-name: - az: string # Name of the AZ - flavor: string # Flavor ID to use - floating_ip: bool # If floating IPs should be used or not - reserved_floating_ip: string # If floating_ip is true use existing floating IP, if reserved_floating_ip is an empty string and floating_ip is true, a new floating IP will be created - extra_groups: string # (optional) Additional groups to add for kubespray, defaults to no groups - image_id: string # (optional) Image ID to use, defaults to var.image_id or var.image - root_volume_size_in_gb: number # (optional) Size of the block storage to use as root disk, defaults to var.node_root_volume_size_in_gb or to use volume from flavor otherwise - volume_type: string # (optional) Volume type to use, defaults to var.node_volume_type - network_id: string # (optional) Use this network_id for the node, defaults to either var.network_id or ID of var.network_name - server_group: string # (optional) Server group to add this node to. If set, this has to be one specified in additional_server_groups, defaults to use the server group specified in node_server_group_policy - cloudinit: # (optional) Options for cloud-init - extra_partitions: # List of extra partitions (other than the root partition) to setup during creation - volume_path: string # Path to the volume to create partition for (e.g. /dev/vda ) - partition_path: string # Path to the partition (e.g. /dev/vda2 ) - mount_path: string # Path to where the partition should be mounted - partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition - partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume) - netplan_critical_dhcp_interface: string # Name of interface to set the dhcp flag critical = true, to circumvent [this issue](https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1776013). -``` - -For example: - -```ini -k8s_nodes = { - "1" = { - "az" = "sto1" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "2" = { - "az" = "sto2" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "3" = { - "az" = "sto3" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - "extra_groups" = "calico_rr" - } -} -``` - -Would result in the same configuration as: - -```ini -number_of_k8s_nodes = 3 -flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445" -az_list = ["sto1", "sto2", "sto3"] -``` - -And: - -```ini -k8s_nodes = { - "ing-1" = { - "az" = "sto1" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "ing-2" = { - "az" = "sto2" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "ing-3" = { - "az" = "sto3" - "flavor" = "83d8b44a-26a0-4f02-a981-079446926445" - "floating_ip" = true - }, - "big-1" = { - "az" = "sto1" - "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" - "floating_ip" = false - }, - "big-2" = { - "az" = "sto2" - "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" - "floating_ip" = false - }, - "big-3" = { - "az" = "sto3" - "flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b" - "floating_ip" = false - }, - "small-1" = { - "az" = "sto1" - "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" - "floating_ip" = false - }, - "small-2" = { - "az" = "sto2" - "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" - "floating_ip" = false - }, - "small-3" = { - "az" = "sto3" - "flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e" - "floating_ip" = false - } -} -``` - -Would result in three nodes in each availability zone each with their own separate naming, -flavor and floating ip configuration. - -The "schema": - -```ini -k8s_nodes = { - "key | node name suffix, must be unique" = { - "az" = string - "flavor" = string - "floating_ip" = bool - }, -} -``` - -All values are required. - -#### Terraform state files - -In the cluster's inventory folder, the following files might be created (either by Terraform -or manually), to prevent you from pushing them accidentally they are in a -`.gitignore` file in the `terraform/openstack` directory : - -- `.terraform` -- `.tfvars` -- `.tfstate` -- `.tfstate.backup` - -You can still add them manually if you want to. - -### Initialization - -Before Terraform can operate on your cluster you need to install the required -plugins. This is accomplished as follows: - -```ShellSession -cd inventory/$CLUSTER -terraform -chdir="../../contrib/terraform/openstack" init -``` - -This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. - -### Customizing with cloud-init - -You can apply cloud-init based customization for the openstack instances before provisioning your cluster. -One common template is used for all instances. Adjust the file shown below: -`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl` -For example, to enable openstack novnc access and ansible_user=root SSH access: - -```ShellSession -#cloud-config -## in some cases novnc console access is required -## it requires ssh password to be set -ssh_pwauth: yes -chpasswd: - list: | - root:secret - expire: False - -## in some cases direct root ssh access via ssh key is required -disable_root: false -``` - -### Provisioning cluster - -You can apply the Terraform configuration to your cluster with the following command -issued from your cluster's inventory directory (`inventory/$CLUSTER`): - -```ShellSession -terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars -``` - -if you chose to create a bastion host, this script will create -`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to -be able to access your machines tunneling through the bastion's IP address. If -you want to manually handle the ssh tunneling to these machines, please delete -or move that file. If you want to use this, just leave it there, as ansible will -pick it up automatically. - -### Destroying cluster - -You can destroy your new cluster with the following command issued from the cluster's inventory directory: - -```ShellSession -terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars -``` - -If you've started the Ansible run, it may also be a good idea to do some manual cleanup: - -- remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file -- clean up any temporary cache files: `rm /tmp/$CLUSTER-*` - -### Debugging - -You can enable debugging output from Terraform by setting -`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command. - -### Terraform output - -Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment: - -- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id` -- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id` - -## Ansible - -### Node access - -#### SSH - -Ensure your local ssh-agent is running and your ssh key has been added. This -step is required by the terraform provisioner: - -```ShellSession -eval $(ssh-agent -s) -ssh-add ~/.ssh/id_rsa -``` - -If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`). - -#### Metadata variables - -The [python script](../terraform.py) that reads the -generated`.tfstate` file to generate a dynamic inventory recognizes -some variables within a "metadata" block, defined in a "resource" -block (example): - -```ini -resource "openstack_compute_instance_v2" "example" { - ... - metadata { - ssh_user = "ubuntu" - prefer_ipv6 = true - python_bin = "/usr/bin/python3" - } - ... -} -``` - -As the example shows, these let you define the SSH username for -Ansible, a Python binary which is needed by Ansible if -`/usr/bin/python` doesn't exist, and whether the IPv6 address of the -instance should be preferred over IPv4. - -#### Bastion host - -Bastion access will be determined by: - -- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable). -- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables). - -If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned. -If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines. - -So, either a bastion host, or at least master/node with a floating IP are required. - -#### Test access - -Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`. - -```ShellSession -$ ansible -i inventory/$CLUSTER/hosts -m ping all -example-k8s_node-1 | SUCCESS => { - "changed": false, - "ping": "pong" -} -example-etcd-1 | SUCCESS => { - "changed": false, - "ping": "pong" -} -example-k8s-master-1 | SUCCESS => { - "changed": false, - "ping": "pong" -} -``` - -If it fails try to connect manually via SSH. It could be something as simple as a stale host key. - -### Configure cluster variables - -Edit `inventory/$CLUSTER/group_vars/all/all.yml`: - -- **bin_dir**: - -```yml -# Directory where the binaries will be installed -# Default: -# bin_dir: /usr/local/bin -# For Flatcar Container Linux by Kinvolk: -bin_dir: /opt/bin -``` - -- and **cloud_provider**: - -```yml -cloud_provider: openstack -``` - -Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`: - -- Set variable **kube_network_plugin** to your desired networking plugin. - - **flannel** works out-of-the-box - - **calico** requires [configuring OpenStack Neutron ports](/docs/cloud_controllers/openstack.md) to allow service and pod subnets - -```yml -# Choose network plugin (calico or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: flannel -``` - -- Set variable **resolvconf_mode** - -```yml -# Can be docker_dns, host_resolvconf or none -# Default: -# resolvconf_mode: docker_dns -# For Flatcar Container Linux by Kinvolk: -resolvconf_mode: host_resolvconf -``` - -- Set max amount of attached cinder volume per host (default 256) - -```yml -node_volume_attach_limit: 26 -``` - -### Deploy Kubernetes - -```ShellSession -ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml -``` - -This will take some time as there are many tasks to run. - -## Kubernetes - -### Set up kubectl - -1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation -2. Add a route to the internal IP of a master node (if needed): - -```ShellSession -sudo route add [master-internal-ip] gw [router-ip] -``` - -or - -```ShellSession -sudo route add -net [internal-subnet]/24 gw [router-ip] -``` - -1. List Kubernetes certificates & keys: - -```ShellSession -ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/ -``` - -1. Get `admin`'s certificates and keys: - -```ShellSession -ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1-key.pem > admin-key.pem -ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1.pem > admin.pem -ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem -``` - -1. Configure kubectl: - -```ShellSession -$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \ - --certificate-authority=ca.pem - -$ kubectl config set-credentials default-admin \ - --certificate-authority=ca.pem \ - --client-key=admin-key.pem \ - --client-certificate=admin.pem - -$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin -$ kubectl config use-context default-system -``` - -1. Check it: - -```ShellSession -kubectl version -``` - -## GlusterFS - -GlusterFS is not deployed by the standard `cluster.yml` playbook, see the -[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md) -for instructions. - -Basically you will install Gluster as - -```ShellSession -ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml -``` - -## What's next - -Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/). - -## Appendix - -### Migration from `number_of_k8s_nodes*` to `k8s_nodes` - -If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish -to migrate to the `k8s_nodes` style you can do it like so: - -```ShellSession -$ terraform state list -module.compute.data.openstack_images_image_v2.gfs_image -module.compute.data.openstack_images_image_v2.vm_image -module.compute.openstack_compute_floatingip_associate_v2.k8s_master[0] -module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0] -module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1] -module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2] -module.compute.openstack_compute_instance_v2.k8s_master[0] -module.compute.openstack_compute_instance_v2.k8s_node[0] -module.compute.openstack_compute_instance_v2.k8s_node[1] -module.compute.openstack_compute_instance_v2.k8s_node[2] -module.compute.openstack_compute_keypair_v2.k8s -module.compute.openstack_compute_servergroup_v2.k8s_etcd[0] -module.compute.openstack_compute_servergroup_v2.k8s_master[0] -module.compute.openstack_compute_servergroup_v2.k8s_node[0] -module.compute.openstack_networking_secgroup_rule_v2.bastion[0] -module.compute.openstack_networking_secgroup_rule_v2.egress[0] -module.compute.openstack_networking_secgroup_rule_v2.k8s -module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[0] -module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[1] -module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[2] -module.compute.openstack_networking_secgroup_rule_v2.k8s_master[0] -module.compute.openstack_networking_secgroup_rule_v2.worker[0] -module.compute.openstack_networking_secgroup_rule_v2.worker[1] -module.compute.openstack_networking_secgroup_rule_v2.worker[2] -module.compute.openstack_networking_secgroup_rule_v2.worker[3] -module.compute.openstack_networking_secgroup_rule_v2.worker[4] -module.compute.openstack_networking_secgroup_v2.bastion[0] -module.compute.openstack_networking_secgroup_v2.k8s -module.compute.openstack_networking_secgroup_v2.k8s_master -module.compute.openstack_networking_secgroup_v2.worker -module.ips.null_resource.dummy_dependency -module.ips.openstack_networking_floatingip_v2.k8s_master[0] -module.ips.openstack_networking_floatingip_v2.k8s_node[0] -module.ips.openstack_networking_floatingip_v2.k8s_node[1] -module.ips.openstack_networking_floatingip_v2.k8s_node[2] -module.network.openstack_networking_network_v2.k8s[0] -module.network.openstack_networking_router_interface_v2.k8s[0] -module.network.openstack_networking_router_v2.k8s[0] -module.network.openstack_networking_subnet_v2.k8s[0] -$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["1"]' -Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"1\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["2"]' -Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"2\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["3"]' -Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"3\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[0]' 'module.compute.openstack_compute_instance_v2.k8s_node["1"]' -Move "module.compute.openstack_compute_instance_v2.k8s_node[0]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"1\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[1]' 'module.compute.openstack_compute_instance_v2.k8s_node["2"]' -Move "module.compute.openstack_compute_instance_v2.k8s_node[1]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"2\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[2]' 'module.compute.openstack_compute_instance_v2.k8s_node["3"]' -Move "module.compute.openstack_compute_instance_v2.k8s_node[2]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"3\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[0]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["1"]' -Move "module.ips.openstack_networking_floatingip_v2.k8s_node[0]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"1\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[1]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["2"]' -Move "module.ips.openstack_networking_floatingip_v2.k8s_node[1]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"2\"]" -Successfully moved 1 object(s). -$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[2]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["3"]' -Move "module.ips.openstack_networking_floatingip_v2.k8s_node[2]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"3\"]" -Successfully moved 1 object(s). -``` - -Of course for nodes without floating ips those steps can be omitted. diff --git a/contrib/terraform/openstack/hosts b/contrib/terraform/openstack/hosts deleted file mode 120000 index 804b6fa6069..00000000000 --- a/contrib/terraform/openstack/hosts +++ /dev/null @@ -1 +0,0 @@ -../terraform.py \ No newline at end of file diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf deleted file mode 100644 index 556fa54340f..00000000000 --- a/contrib/terraform/openstack/kubespray.tf +++ /dev/null @@ -1,155 +0,0 @@ -module "network" { - source = "./modules/network" - - external_net = var.external_net - network_name = var.network_name - subnet_cidr = var.subnet_cidr - cluster_name = var.cluster_name - dns_nameservers = var.dns_nameservers - network_dns_domain = var.network_dns_domain - use_neutron = var.use_neutron - port_security_enabled = var.port_security_enabled - router_id = var.router_id -} - -module "ips" { - source = "./modules/ips" - - number_of_k8s_masters = var.number_of_k8s_masters - number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd - number_of_k8s_nodes = var.number_of_k8s_nodes - floatingip_pool = var.floatingip_pool - number_of_bastions = var.number_of_bastions - external_net = var.external_net - network_name = var.network_name - router_id = module.network.router_id - k8s_nodes = var.k8s_nodes - k8s_masters = var.k8s_masters - k8s_master_fips = var.k8s_master_fips - bastion_fips = var.bastion_fips - router_internal_port_id = module.network.router_internal_port_id -} - -module "compute" { - source = "./modules/compute" - - cluster_name = var.cluster_name - az_list = var.az_list - az_list_node = var.az_list_node - number_of_k8s_masters = var.number_of_k8s_masters - number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd - number_of_etcd = var.number_of_etcd - number_of_k8s_masters_no_floating_ip = var.number_of_k8s_masters_no_floating_ip - number_of_k8s_masters_no_floating_ip_no_etcd = var.number_of_k8s_masters_no_floating_ip_no_etcd - number_of_k8s_nodes = var.number_of_k8s_nodes - number_of_bastions = var.number_of_bastions - number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip - number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip - k8s_masters = var.k8s_masters - k8s_nodes = var.k8s_nodes - bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb - etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb - master_root_volume_size_in_gb = var.master_root_volume_size_in_gb - node_root_volume_size_in_gb = var.node_root_volume_size_in_gb - gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb - gfs_volume_size_in_gb = var.gfs_volume_size_in_gb - master_volume_type = var.master_volume_type - node_volume_type = var.node_volume_type - public_key_path = var.public_key_path - image = var.image - image_uuid = var.image_uuid - image_gfs = var.image_gfs - image_master = var.image_master - image_master_uuid = var.image_master_uuid - image_gfs_uuid = var.image_gfs_uuid - ssh_user = var.ssh_user - ssh_user_gfs = var.ssh_user_gfs - flavor_k8s_master = var.flavor_k8s_master - flavor_k8s_node = var.flavor_k8s_node - flavor_etcd = var.flavor_etcd - flavor_gfs_node = var.flavor_gfs_node - network_name = var.network_name - flavor_bastion = var.flavor_bastion - k8s_master_fips = module.ips.k8s_master_fips - k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips - k8s_masters_fips = module.ips.k8s_masters_fips - k8s_node_fips = module.ips.k8s_node_fips - k8s_nodes_fips = module.ips.k8s_nodes_fips - bastion_fips = module.ips.bastion_fips - bastion_allowed_remote_ips = var.bastion_allowed_remote_ips - bastion_allowed_remote_ipv6_ips = var.bastion_allowed_remote_ipv6_ips - master_allowed_remote_ips = var.master_allowed_remote_ips - master_allowed_remote_ipv6_ips = var.master_allowed_remote_ipv6_ips - k8s_allowed_remote_ips = var.k8s_allowed_remote_ips - k8s_allowed_remote_ips_ipv6 = var.k8s_allowed_remote_ips_ipv6 - k8s_allowed_egress_ips = var.k8s_allowed_egress_ips - k8s_allowed_egress_ipv6_ips = var.k8s_allowed_egress_ipv6_ips - supplementary_master_groups = var.supplementary_master_groups - supplementary_node_groups = var.supplementary_node_groups - master_allowed_ports = var.master_allowed_ports - master_allowed_ports_ipv6 = var.master_allowed_ports_ipv6 - worker_allowed_ports = var.worker_allowed_ports - worker_allowed_ports_ipv6 = var.worker_allowed_ports_ipv6 - bastion_allowed_ports = var.bastion_allowed_ports - bastion_allowed_ports_ipv6 = var.bastion_allowed_ports_ipv6 - use_access_ip = var.use_access_ip - master_server_group_policy = var.master_server_group_policy - node_server_group_policy = var.node_server_group_policy - etcd_server_group_policy = var.etcd_server_group_policy - extra_sec_groups = var.extra_sec_groups - extra_sec_groups_name = var.extra_sec_groups_name - group_vars_path = var.group_vars_path - port_security_enabled = var.port_security_enabled - force_null_port_security = var.force_null_port_security - network_router_id = module.network.router_id - network_id = module.network.network_id - use_existing_network = var.use_existing_network - private_subnet_id = module.network.subnet_id - additional_server_groups = var.additional_server_groups - - depends_on = [ - module.network.subnet_id - ] -} - -module "loadbalancer" { - source = "./modules/loadbalancer" - - cluster_name = var.cluster_name - subnet_id = module.network.subnet_id - floatingip_pool = var.floatingip_pool - k8s_master_ips = module.compute.k8s_master_ips - k8s_master_loadbalancer_enabled = var.k8s_master_loadbalancer_enabled - k8s_master_loadbalancer_listener_port = var.k8s_master_loadbalancer_listener_port - k8s_master_loadbalancer_server_port = var.k8s_master_loadbalancer_server_port - k8s_master_loadbalancer_public_ip = var.k8s_master_loadbalancer_public_ip - - depends_on = [ - module.compute.k8s_master - ] -} - - -output "private_subnet_id" { - value = module.network.subnet_id -} - -output "floating_network_id" { - value = var.external_net -} - -output "router_id" { - value = module.network.router_id -} - -output "k8s_master_fips" { - value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address] -} - -output "k8s_node_fips" { - value = var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address] -} - -output "bastion_fips" { - value = module.ips.bastion_fips -} diff --git a/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt b/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt deleted file mode 100644 index a304b2c9d5d..00000000000 --- a/contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt +++ /dev/null @@ -1 +0,0 @@ -ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'" diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf deleted file mode 100644 index 2256ea2b4e6..00000000000 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ /dev/null @@ -1,1092 +0,0 @@ -data "openstack_images_image_v2" "vm_image" { - count = var.image_uuid == "" ? 1 : 0 - most_recent = true - name = var.image -} - -data "openstack_images_image_v2" "gfs_image" { - count = var.image_gfs_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0 - most_recent = true - name = var.image_gfs == "" ? var.image : var.image_gfs -} - -data "openstack_images_image_v2" "image_master" { - count = var.image_master_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0 - name = var.image_master == "" ? var.image : var.image_master -} - -data "cloudinit_config" "cloudinit" { - part { - content_type = "text/cloud-config" - content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", { - extra_partitions = [], - netplan_critical_dhcp_interface = "" - }) - } -} - -data "openstack_networking_network_v2" "k8s_network" { - count = var.use_existing_network ? 1 : 0 - name = var.network_name -} - -resource "openstack_compute_keypair_v2" "k8s" { - name = "kubernetes-${var.cluster_name}" - public_key = chomp(file(var.public_key_path)) -} - -resource "openstack_networking_secgroup_v2" "k8s_master" { - name = "${var.cluster_name}-k8s-master" - description = "${var.cluster_name} - Kubernetes Master" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_v2" "k8s_master_extra" { - count = "%{if var.extra_sec_groups}1%{else}0%{endif}" - name = "${var.cluster_name}-k8s-master-${var.extra_sec_groups_name}" - description = "${var.cluster_name} - Kubernetes Master nodes - rules not managed by terraform" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master" { - count = length(var.master_allowed_remote_ips) - direction = "ingress" - ethertype = "IPv4" - protocol = "tcp" - port_range_min = "6443" - port_range_max = "6443" - remote_ip_prefix = var.master_allowed_remote_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master_ports" { - count = length(var.master_allowed_ports) - direction = "ingress" - ethertype = "IPv4" - protocol = lookup(var.master_allowed_ports[count.index], "protocol", "tcp") - port_range_min = lookup(var.master_allowed_ports[count.index], "port_range_min") - port_range_max = lookup(var.master_allowed_ports[count.index], "port_range_max") - remote_ip_prefix = lookup(var.master_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master_ipv6_ingress" { - count = length(var.master_allowed_remote_ipv6_ips) - direction = "ingress" - ethertype = "IPv6" - protocol = "tcp" - port_range_min = "6443" - port_range_max = "6443" - remote_ip_prefix = var.master_allowed_remote_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_master_ports_ipv6_ingress" { - count = length(var.master_allowed_ports_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = lookup(var.master_allowed_ports_ipv6[count.index], "protocol", "tcp") - port_range_min = lookup(var.master_allowed_ports_ipv6[count.index], "port_range_min") - port_range_max = lookup(var.master_allowed_ports_ipv6[count.index], "port_range_max") - remote_ip_prefix = lookup(var.master_allowed_ports_ipv6[count.index], "remote_ip_prefix", "::/0") - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_rule_v2" "master_egress_ipv6" { - count = length(var.k8s_allowed_egress_ipv6_ips) - direction = "egress" - ethertype = "IPv6" - remote_ip_prefix = var.k8s_allowed_egress_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s_master.id -} - -resource "openstack_networking_secgroup_v2" "bastion" { - name = "${var.cluster_name}-bastion" - count = var.number_of_bastions != "" ? 1 : 0 - description = "${var.cluster_name} - Bastion Server" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "bastion" { - count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0 - direction = "ingress" - ethertype = "IPv4" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.bastion_allowed_remote_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" { - count = length(var.bastion_allowed_ports) - direction = "ingress" - ethertype = "IPv4" - protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp") - port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min") - port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max") - remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_rule_v2" "bastion_ipv6_ingress" { - count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ipv6_ips) : 0 - direction = "ingress" - ethertype = "IPv6" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.bastion_allowed_remote_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports_ipv6_ingress" { - count = length(var.bastion_allowed_ports_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = lookup(var.bastion_allowed_ports_ipv6[count.index], "protocol", "tcp") - port_range_min = lookup(var.bastion_allowed_ports_ipv6[count.index], "port_range_min") - port_range_max = lookup(var.bastion_allowed_ports_ipv6[count.index], "port_range_max") - remote_ip_prefix = lookup(var.bastion_allowed_ports_ipv6[count.index], "remote_ip_prefix", "::/0") - security_group_id = openstack_networking_secgroup_v2.bastion[0].id -} - -resource "openstack_networking_secgroup_v2" "k8s" { - name = "${var.cluster_name}-k8s" - description = "${var.cluster_name} - Kubernetes" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "k8s" { - direction = "ingress" - ethertype = "IPv4" - remote_group_id = openstack_networking_secgroup_v2.k8s.id - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_ipv6" { - direction = "ingress" - ethertype = "IPv6" - remote_group_id = openstack_networking_secgroup_v2.k8s.id - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips" { - count = length(var.k8s_allowed_remote_ips) - direction = "ingress" - ethertype = "IPv4" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.k8s_allowed_remote_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips_ipv6" { - count = length(var.k8s_allowed_remote_ips_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = "tcp" - port_range_min = "22" - port_range_max = "22" - remote_ip_prefix = var.k8s_allowed_remote_ips_ipv6[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "egress" { - count = length(var.k8s_allowed_egress_ips) - direction = "egress" - ethertype = "IPv4" - remote_ip_prefix = var.k8s_allowed_egress_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_rule_v2" "egress_ipv6" { - count = length(var.k8s_allowed_egress_ipv6_ips) - direction = "egress" - ethertype = "IPv6" - remote_ip_prefix = var.k8s_allowed_egress_ipv6_ips[count.index] - security_group_id = openstack_networking_secgroup_v2.k8s.id -} - -resource "openstack_networking_secgroup_v2" "worker" { - name = "${var.cluster_name}-k8s-worker" - description = "${var.cluster_name} - Kubernetes worker nodes" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_v2" "worker_extra" { - count = "%{if var.extra_sec_groups}1%{else}0%{endif}" - name = "${var.cluster_name}-k8s-worker-${var.extra_sec_groups_name}" - description = "${var.cluster_name} - Kubernetes worker nodes - rules not managed by terraform" - delete_default_rules = true -} - -resource "openstack_networking_secgroup_rule_v2" "worker" { - count = length(var.worker_allowed_ports) - direction = "ingress" - ethertype = "IPv4" - protocol = lookup(var.worker_allowed_ports[count.index], "protocol", "tcp") - port_range_min = lookup(var.worker_allowed_ports[count.index], "port_range_min") - port_range_max = lookup(var.worker_allowed_ports[count.index], "port_range_max") - remote_ip_prefix = lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0") - security_group_id = openstack_networking_secgroup_v2.worker.id -} - -resource "openstack_networking_secgroup_rule_v2" "worker_ipv6_ingress" { - count = length(var.worker_allowed_ports_ipv6) - direction = "ingress" - ethertype = "IPv6" - protocol = lookup(var.worker_allowed_ports_ipv6[count.index], "protocol", "tcp") - port_range_min = lookup(var.worker_allowed_ports_ipv6[count.index], "port_range_min") - port_range_max = lookup(var.worker_allowed_ports_ipv6[count.index], "port_range_max") - remote_ip_prefix = lookup(var.worker_allowed_ports_ipv6[count.index], "remote_ip_prefix", "::/0") - security_group_id = openstack_networking_secgroup_v2.worker.id -} - -resource "openstack_compute_servergroup_v2" "k8s_master" { - count = var.master_server_group_policy != "" ? 1 : 0 - name = "k8s-master-srvgrp" - policies = [var.master_server_group_policy] -} - -resource "openstack_compute_servergroup_v2" "k8s_node" { - count = var.node_server_group_policy != "" ? 1 : 0 - name = "k8s-node-srvgrp" - policies = [var.node_server_group_policy] -} - -resource "openstack_compute_servergroup_v2" "k8s_etcd" { - count = var.etcd_server_group_policy != "" ? 1 : 0 - name = "k8s-etcd-srvgrp" - policies = [var.etcd_server_group_policy] -} - -resource "openstack_compute_servergroup_v2" "k8s_node_additional" { - for_each = var.additional_server_groups - name = "k8s-${each.key}-srvgrp" - policies = [each.value.policy] -} - -locals { -# master groups - master_sec_groups = compact([ - openstack_networking_secgroup_v2.k8s_master.id, - openstack_networking_secgroup_v2.k8s.id, - var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "", - ]) -# worker groups - worker_sec_groups = compact([ - openstack_networking_secgroup_v2.k8s.id, - openstack_networking_secgroup_v2.worker.id, - var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "", - ]) -# bastion groups - bastion_sec_groups = compact(concat([ - openstack_networking_secgroup_v2.k8s.id, - openstack_networking_secgroup_v2.bastion[0].id, - ])) -# etcd groups - etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) -# glusterfs groups - gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) - -# Image uuid - image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id -# Image_gfs uuid - image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id -# image_master uuidimage_gfs_uuid - image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id - - k8s_nodes_settings = { - for name, node in var.k8s_nodes : - name => { - "use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb) == 0, - "image_id" = node.image_id != null ? node.image_id : local.image_to_use_node, - "volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb, - "volume_type" = node.volume_type != null ? node.volume_type : var.node_volume_type, - "network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id) - "server_group" = node.server_group != null ? [openstack_compute_servergroup_v2.k8s_node_additional[node.server_group].id] : (var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : []) - } - } - - k8s_masters_settings = { - for name, node in var.k8s_masters : - name => { - "use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb) == 0, - "image_id" = node.image_id != null ? node.image_id : local.image_to_use_master, - "volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb, - "volume_type" = node.volume_type != null ? node.volume_type : var.master_volume_type, - "network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id) - } - } -} - -resource "openstack_networking_port_v2" "bastion_port" { - count = var.number_of_bastions - name = "${var.cluster_name}-bastion-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "bastion" { - name = "${var.cluster_name}-bastion-${count.index + 1}" - count = var.number_of_bastions - image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null - flavor_id = var.flavor_bastion - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] - content { - uuid = local.image_to_use_node - source_type = "image" - volume_size = var.bastion_root_volume_size_in_gb - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.bastion_port.*.id, count.index) - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "bastion" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${var.bastion_fips[0]}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "k8s_master_port" { - count = var.number_of_k8s_masters - name = "${var.cluster_name}-k8s-master-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master" { - name = "${var.cluster_name}-k8s-master-${count.index + 1}" - count = var.number_of_k8s_masters - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "k8s_masters_port" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} - name = "${var.cluster_name}-k8s-${each.key}" - network_id = local.k8s_masters_settings[each.key].network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_masters" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} - name = "${var.cluster_name}-k8s-${each.key}" - availability_zone = each.value.az - image_id = local.k8s_masters_settings[each.key].use_local_disk ? local.k8s_masters_settings[each.key].image_id : null - flavor_id = each.value.flavor - key_pair = openstack_compute_keypair_v2.k8s.name - - dynamic "block_device" { - for_each = !local.k8s_masters_settings[each.key].use_local_disk ? [local.k8s_masters_settings[each.key].image_id] : [] - content { - uuid = block_device.value - source_type = "image" - volume_size = local.k8s_masters_settings[each.key].volume_size - volume_type = local.k8s_masters_settings[each.key].volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = openstack_networking_port_v2.k8s_masters_port[each.key].id - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.module}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}" - } -} - -resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" { - count = var.number_of_k8s_masters_no_etcd - name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { - name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}" - count = var.number_of_k8s_masters_no_etcd - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "etcd_port" { - count = var.number_of_etcd - name = "${var.cluster_name}-etcd-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "etcd" { - name = "${var.cluster_name}-etcd-${count.index + 1}" - count = var.number_of_etcd - availability_zone = element(var.az_list, count.index) - image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_etcd - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.etcd_root_volume_size_in_gb - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.etcd_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.etcd_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_etcd[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "etcd,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" { - count = var.number_of_k8s_masters_no_floating_ip - name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { - name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}" - count = var.number_of_k8s_masters_no_floating_ip - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" { - count = var.number_of_k8s_masters_no_floating_ip_no_etcd - name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.master_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { - name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}" - count = var.number_of_k8s_masters_no_floating_ip_no_etcd - availability_zone = element(var.az_list, count.index) - image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null - flavor_id = var.flavor_k8s_master - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] - content { - uuid = local.image_to_use_master - source_type = "image" - volume_size = var.master_root_volume_size_in_gb - volume_type = var.master_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_master[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_node_port" { - count = var.number_of_k8s_nodes - name = "${var.cluster_name}-k8s-node-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_node" { - name = "${var.cluster_name}-k8s-node-${count.index + 1}" - count = var.number_of_k8s_nodes - availability_zone = element(var.az_list_node, count.index) - image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null - flavor_id = var.flavor_k8s_node - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] - content { - uuid = local.image_to_use_node - source_type = "image" - volume_size = var.node_root_volume_size_in_gb - volume_type = var.node_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index) - } - - - dynamic "scheduler_hints" { - for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_node[0].id - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml" - } -} - -resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" { - count = var.number_of_k8s_nodes_no_floating_ip - name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { - name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}" - count = var.number_of_k8s_nodes_no_floating_ip - availability_zone = element(var.az_list_node, count.index) - image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null - flavor_id = var.flavor_k8s_node - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : [] - content { - uuid = local.image_to_use_node - source_type = "image" - volume_size = var.node_root_volume_size_in_gb - volume_type = var.node_volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : [] - content { - group = scheduler_hints.value - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_port_v2" "k8s_nodes_port" { - for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {} - name = "${var.cluster_name}-k8s-node-${each.key}" - network_id = local.k8s_nodes_settings[each.key].network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - lifecycle { - ignore_changes = [ allowed_address_pairs ] - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "k8s_nodes" { - for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {} - name = "${var.cluster_name}-k8s-node-${each.key}" - availability_zone = each.value.az - image_id = local.k8s_nodes_settings[each.key].use_local_disk ? local.k8s_nodes_settings[each.key].image_id : null - flavor_id = each.value.flavor - key_pair = openstack_compute_keypair_v2.k8s.name - user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", { - extra_partitions = each.value.cloudinit.extra_partitions, - netplan_critical_dhcp_interface = each.value.cloudinit.netplan_critical_dhcp_interface, - }) : data.cloudinit_config.cloudinit.rendered - - dynamic "block_device" { - for_each = !local.k8s_nodes_settings[each.key].use_local_disk ? [local.k8s_nodes_settings[each.key].image_id] : [] - content { - uuid = block_device.value - source_type = "image" - volume_size = local.k8s_nodes_settings[each.key].volume_size - volume_type = local.k8s_nodes_settings[each.key].volume_type - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = openstack_networking_port_v2.k8s_nodes_port[each.key].id - } - - dynamic "scheduler_hints" { - for_each = local.k8s_nodes_settings[each.key].server_group - content { - group = scheduler_hints.value - } - } - - metadata = { - ssh_user = var.ssh_user - kubespray_groups = "kube_node,k8s_cluster,%{if !each.value.floating_ip}no_floating,%{endif}${var.supplementary_node_groups}${each.value.extra_groups != null ? ",${each.value.extra_groups}" : ""}" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } - - provisioner "local-exec" { - command = "%{if each.value.floating_ip}sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}" - } -} - -resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" { - count = var.number_of_gfs_nodes_no_floating_ip - name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}" - network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id - admin_state_up = "true" - port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null - no_security_groups = var.port_security_enabled ? null : false - dynamic "fixed_ip" { - for_each = var.private_subnet_id == "" ? [] : [true] - content { - subnet_id = var.private_subnet_id - } - } - - depends_on = [ - var.network_router_id - ] -} - -resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { - name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}" - count = var.number_of_gfs_nodes_no_floating_ip - availability_zone = element(var.az_list, count.index) - image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null - flavor_id = var.flavor_gfs_node - key_pair = openstack_compute_keypair_v2.k8s.name - - dynamic "block_device" { - for_each = var.gfs_root_volume_size_in_gb > 0 ? [local.image_to_use_gfs] : [] - content { - uuid = local.image_to_use_gfs - source_type = "image" - volume_size = var.gfs_root_volume_size_in_gb - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - } - - network { - port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index) - } - - dynamic "scheduler_hints" { - for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] - content { - group = openstack_compute_servergroup_v2.k8s_node[0].id - } - } - - metadata = { - ssh_user = var.ssh_user_gfs - kubespray_groups = "gfs-cluster,network-storage,no_floating" - depends_on = var.network_router_id - use_access_ip = var.use_access_ip - } -} - -resource "openstack_networking_floatingip_associate_v2" "bastion" { - count = var.number_of_bastions - floating_ip = var.bastion_fips[count.index] - port_id = element(openstack_networking_port_v2.bastion_port.*.id, count.index) -} - - -resource "openstack_networking_floatingip_associate_v2" "k8s_master" { - count = var.number_of_k8s_masters - floating_ip = var.k8s_master_fips[count.index] - port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index) -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_masters" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {} - floating_ip = var.k8s_masters_fips[each.key].address - port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" { - count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0 - floating_ip = var.k8s_master_no_etcd_fips[count.index] - port_id = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index) -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_node" { - count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0 - floating_ip = var.k8s_node_fips[count.index] - port_id = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index) -} - -resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" { - for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {} - floating_ip = var.k8s_nodes_fips[each.key].address - port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id -} - -resource "openstack_blockstorage_volume_v2" "glusterfs_volume" { - name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}" - count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0 - description = "Non-ephemeral volume for GlusterFS" - size = var.gfs_volume_size_in_gb -} - -resource "openstack_compute_volume_attach_v2" "glusterfs_volume" { - count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0 - instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index) - volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index) -} diff --git a/contrib/terraform/openstack/modules/compute/outputs.tf b/contrib/terraform/openstack/modules/compute/outputs.tf deleted file mode 100644 index 741e9f035df..00000000000 --- a/contrib/terraform/openstack/modules/compute/outputs.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "k8s_master_ips" { - value = concat(openstack_compute_instance_v2.k8s_master_no_floating_ip.*, openstack_compute_instance_v2.k8s_master_no_floating_ip_no_etcd.*) -} diff --git a/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl b/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl deleted file mode 100644 index fd05cc44e50..00000000000 --- a/contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl +++ /dev/null @@ -1,54 +0,0 @@ -%{~ if length(extra_partitions) > 0 || netplan_critical_dhcp_interface != "" } -#cloud-config -bootcmd: -%{~ for idx, partition in extra_partitions } -- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, ${partition.volume_path} ] -- [ cloud-init-per, once, create-part-${idx}, parted, --script, ${partition.volume_path}, 'mkpart extended ext4 ${partition.partition_start} ${partition.partition_end}' ] -- [ cloud-init-per, once, create-fs-part-${idx}, mkfs.ext4, ${partition.partition_path} ] -%{~ endfor } - -runcmd: -%{~ if netplan_critical_dhcp_interface != "" } - - netplan apply -%{~ endif } -%{~ for idx, partition in extra_partitions } - - mkdir -p ${partition.mount_path} - - chown nobody:nogroup ${partition.mount_path} - - mount ${partition.partition_path} ${partition.mount_path} -%{~ endfor ~} - -%{~ if netplan_critical_dhcp_interface != "" } -write_files: - - path: /etc/netplan/90-critical-dhcp.yaml - content: | - network: - version: 2 - ethernets: - ${ netplan_critical_dhcp_interface }: - dhcp4: true - critical: true -%{~ endif } - -mounts: -%{~ for idx, partition in extra_partitions } - - [ ${partition.partition_path}, ${partition.mount_path} ] -%{~ endfor } -%{~ else ~} -# yamllint disable rule:comments -#cloud-config -## in some cases novnc console access is required -## it requires ssh password to be set -#ssh_pwauth: yes -#chpasswd: -# list: | -# root:secret -# expire: False - -## in some cases direct root ssh access via ssh key is required -#disable_root: false - -## in some cases additional CA certs are required -#ca-certs: -# trusted: | -# -----BEGIN CERTIFICATE----- -%{~ endif } diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf deleted file mode 100644 index ed478de3c2f..00000000000 --- a/contrib/terraform/openstack/modules/compute/variables.tf +++ /dev/null @@ -1,269 +0,0 @@ -variable "cluster_name" {} - -variable "az_list" { - type = list(string) -} - -variable "az_list_node" { - type = list(string) -} - -variable "number_of_k8s_masters" {} - -variable "number_of_k8s_masters_no_etcd" {} - -variable "number_of_etcd" {} - -variable "number_of_k8s_masters_no_floating_ip" {} - -variable "number_of_k8s_masters_no_floating_ip_no_etcd" {} - -variable "number_of_k8s_nodes" {} - -variable "number_of_k8s_nodes_no_floating_ip" {} - -variable "number_of_bastions" {} - -variable "number_of_gfs_nodes_no_floating_ip" {} - -variable "bastion_root_volume_size_in_gb" {} - -variable "etcd_root_volume_size_in_gb" {} - -variable "master_root_volume_size_in_gb" {} - -variable "node_root_volume_size_in_gb" {} - -variable "gfs_root_volume_size_in_gb" {} - -variable "gfs_volume_size_in_gb" {} - -variable "master_volume_type" {} - -variable "node_volume_type" {} - -variable "public_key_path" {} - -variable "image" {} - -variable "image_gfs" {} - -variable "ssh_user" {} - -variable "ssh_user_gfs" {} - -variable "flavor_k8s_master" {} - -variable "flavor_k8s_node" {} - -variable "flavor_etcd" {} - -variable "flavor_gfs_node" {} - -variable "network_name" {} - -variable "flavor_bastion" {} - -variable "network_id" { - default = "" -} - -variable "use_existing_network" { - type = bool -} - -variable "network_router_id" { - default = "" -} - -variable "k8s_master_fips" { - type = list -} - -variable "k8s_master_no_etcd_fips" { - type = list -} - -variable "k8s_node_fips" { - type = list -} - -variable "k8s_masters_fips" { - type = map(object({ - address = string - })) -} - -variable "k8s_nodes_fips" { - type = map(object({ - address = string - })) -} - -variable "bastion_fips" { - type = list -} - -variable "bastion_allowed_remote_ips" { - type = list -} - -variable "bastion_allowed_remote_ipv6_ips" { - type = list -} - -variable "master_allowed_remote_ips" { - type = list -} - -variable "master_allowed_remote_ipv6_ips" { - type = list -} - -variable "k8s_allowed_remote_ips" { - type = list -} - -variable "k8s_allowed_remote_ips_ipv6" { - type = list -} - -variable "k8s_allowed_egress_ips" { - type = list -} - -variable "k8s_allowed_egress_ipv6_ips" { - type = list -} - -variable "k8s_masters" { - type = map(object({ - az = string - flavor = string - etcd = bool - floating_ip = bool - reserved_floating_ip = optional(string) - image_id = optional(string) - root_volume_size_in_gb = optional(number) - volume_type = optional(string) - network_id = optional(string) - })) -} - -variable "k8s_nodes" { - type = map(object({ - az = string - flavor = string - floating_ip = bool - reserved_floating_ip = optional(string) - extra_groups = optional(string) - image_id = optional(string) - root_volume_size_in_gb = optional(number) - volume_type = optional(string) - network_id = optional(string) - additional_server_groups = optional(list(string)) - server_group = optional(string) - cloudinit = optional(object({ - extra_partitions = optional(list(object({ - volume_path = string - partition_path = string - partition_start = string - partition_end = string - mount_path = string - })), []) - netplan_critical_dhcp_interface = optional(string, "") - })) - })) -} - -variable "additional_server_groups" { - type = map(object({ - policy = string - })) -} - -variable "supplementary_master_groups" { - default = "" -} - -variable "supplementary_node_groups" { - default = "" -} - -variable "master_allowed_ports" { - type = list -} - -variable "master_allowed_ports_ipv6" { - type = list -} - -variable "worker_allowed_ports" { - type = list -} - -variable "worker_allowed_ports_ipv6" { - type = list -} - -variable "bastion_allowed_ports" { - type = list -} - -variable "bastion_allowed_ports_ipv6" { - type = list -} - -variable "use_access_ip" {} - -variable "master_server_group_policy" { - type = string -} - -variable "node_server_group_policy" { - type = string -} - -variable "etcd_server_group_policy" { - type = string -} - -variable "extra_sec_groups" { - type = bool -} - -variable "extra_sec_groups_name" { - type = string -} - -variable "image_uuid" { - type = string -} - -variable "image_gfs_uuid" { - type = string -} - -variable "image_master" { - type = string -} - -variable "image_master_uuid" { - type = string -} - -variable "group_vars_path" { - type = string -} - -variable "port_security_enabled" { - type = bool -} - -variable "force_null_port_security" { - type = bool -} - -variable "private_subnet_id" { - type = string -} diff --git a/contrib/terraform/openstack/modules/compute/versions.tf b/contrib/terraform/openstack/modules/compute/versions.tf deleted file mode 100644 index bfcf77a5c0d..00000000000 --- a/contrib/terraform/openstack/modules/compute/versions.tf +++ /dev/null @@ -1,8 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 1.3.0" -} diff --git a/contrib/terraform/openstack/modules/ips/main.tf b/contrib/terraform/openstack/modules/ips/main.tf deleted file mode 100644 index 68a4af3ecdc..00000000000 --- a/contrib/terraform/openstack/modules/ips/main.tf +++ /dev/null @@ -1,46 +0,0 @@ -resource "null_resource" "dummy_dependency" { - triggers = { - dependency_id = var.router_id - } - depends_on = [ - var.router_internal_port_id - ] -} - -# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones. -resource "openstack_networking_floatingip_v2" "k8s_master" { - count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "k8s_masters" { - for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({}) - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones. -resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" { - count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "k8s_node" { - count = var.number_of_k8s_nodes - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "bastion" { - count = length(var.bastion_fips) > 0 ? 0 : var.number_of_bastions - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} - -resource "openstack_networking_floatingip_v2" "k8s_nodes" { - for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip && (lookup(value, "reserved_floating_ip", "") == "") } : tomap({}) - pool = var.floatingip_pool - depends_on = [null_resource.dummy_dependency] -} diff --git a/contrib/terraform/openstack/modules/ips/outputs.tf b/contrib/terraform/openstack/modules/ips/outputs.tf deleted file mode 100644 index 670481109af..00000000000 --- a/contrib/terraform/openstack/modules/ips/outputs.tf +++ /dev/null @@ -1,48 +0,0 @@ -locals { - k8s_masters_reserved_fips = { - for key, value in var.k8s_masters : key => { - address = value.reserved_floating_ip - } if value.floating_ip && (lookup(value, "reserved_floating_ip", "") != "") - } - k8s_masters_create_fips = { - for key, value in openstack_networking_floatingip_v2.k8s_masters : key => { - address = value.address - } - } - k8s_nodes_reserved_fips = { - for key, value in var.k8s_nodes : key => { - address = value.reserved_floating_ip - } if value.floating_ip && (lookup(value, "reserved_floating_ip", "") != "") - } - k8s_nodes_create_fips = { - for key, value in openstack_networking_floatingip_v2.k8s_nodes : key => { - address = value.address - } - } -} - -# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created. -output "k8s_master_fips" { - value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address -} - -output "k8s_masters_fips" { - value = merge(local.k8s_masters_create_fips, local.k8s_masters_reserved_fips) -} - -# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created. -output "k8s_master_no_etcd_fips" { - value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address -} - -output "k8s_node_fips" { - value = openstack_networking_floatingip_v2.k8s_node[*].address -} - -output "k8s_nodes_fips" { - value = merge(local.k8s_nodes_create_fips, local.k8s_nodes_reserved_fips) -} - -output "bastion_fips" { - value = length(var.bastion_fips) > 0 ? var.bastion_fips : openstack_networking_floatingip_v2.bastion[*].address -} diff --git a/contrib/terraform/openstack/modules/ips/variables.tf b/contrib/terraform/openstack/modules/ips/variables.tf deleted file mode 100644 index b52888b847f..00000000000 --- a/contrib/terraform/openstack/modules/ips/variables.tf +++ /dev/null @@ -1,27 +0,0 @@ -variable "number_of_k8s_masters" {} - -variable "number_of_k8s_masters_no_etcd" {} - -variable "number_of_k8s_nodes" {} - -variable "floatingip_pool" {} - -variable "number_of_bastions" {} - -variable "external_net" {} - -variable "network_name" {} - -variable "router_id" { - default = "" -} - -variable "k8s_masters" {} - -variable "k8s_nodes" {} - -variable "k8s_master_fips" {} - -variable "bastion_fips" {} - -variable "router_internal_port_id" {} diff --git a/contrib/terraform/openstack/modules/ips/versions.tf b/contrib/terraform/openstack/modules/ips/versions.tf deleted file mode 100644 index b7bf5a9cde3..00000000000 --- a/contrib/terraform/openstack/modules/ips/versions.tf +++ /dev/null @@ -1,11 +0,0 @@ -terraform { - required_providers { - null = { - source = "hashicorp/null" - } - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 0.12.26" -} diff --git a/contrib/terraform/openstack/modules/loadbalancer/main.tf b/contrib/terraform/openstack/modules/loadbalancer/main.tf deleted file mode 100644 index 12fa225ea6e..00000000000 --- a/contrib/terraform/openstack/modules/loadbalancer/main.tf +++ /dev/null @@ -1,54 +0,0 @@ -resource "openstack_lb_loadbalancer_v2" "k8s_lb" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "${var.cluster_name}-api-loadbalancer" - vip_subnet_id = var.subnet_id -} - -resource "openstack_lb_listener_v2" "api_listener"{ - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "api-listener" - protocol = "TCP" - protocol_port = var.k8s_master_loadbalancer_listener_port - loadbalancer_id = openstack_lb_loadbalancer_v2.k8s_lb[0].id - depends_on = [ openstack_lb_loadbalancer_v2.k8s_lb ] -} - -resource "openstack_lb_pool_v2" "api_pool" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "api-pool" - protocol = "TCP" - lb_method = "ROUND_ROBIN" - listener_id = openstack_lb_listener_v2.api_listener[0].id - depends_on = [ openstack_lb_listener_v2.api_listener ] -} - -resource "openstack_lb_member_v2" "lb_member" { - count = var.k8s_master_loadbalancer_enabled ? length(var.k8s_master_ips) : 0 - name = var.k8s_master_ips[count.index].name - pool_id = openstack_lb_pool_v2.api_pool[0].id - address = var.k8s_master_ips[count.index].access_ip_v4 - protocol_port = var.k8s_master_loadbalancer_server_port - depends_on = [ openstack_lb_pool_v2.api_pool ] -} - -resource "openstack_lb_monitor_v2" "monitor" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - name = "Api Monitor" - pool_id = openstack_lb_pool_v2.api_pool[0].id - type = "TCP" - delay = 10 - timeout = 5 - max_retries = 5 -} - -resource "openstack_networking_floatingip_v2" "floatip_1" { - count = var.k8s_master_loadbalancer_enabled && var.k8s_master_loadbalancer_public_ip == "" ? 1 : 0 - pool = var.floatingip_pool -} - -resource "openstack_networking_floatingip_associate_v2" "public_ip" { - count = var.k8s_master_loadbalancer_enabled ? 1 : 0 - floating_ip = var.k8s_master_loadbalancer_public_ip != "" ? var.k8s_master_loadbalancer_public_ip : openstack_networking_floatingip_v2.floatip_1[0].address - port_id = openstack_lb_loadbalancer_v2.k8s_lb[0].vip_port_id - depends_on = [ openstack_lb_loadbalancer_v2.k8s_lb ] -} diff --git a/contrib/terraform/openstack/modules/loadbalancer/variables.tf b/contrib/terraform/openstack/modules/loadbalancer/variables.tf deleted file mode 100644 index 40b1b588cef..00000000000 --- a/contrib/terraform/openstack/modules/loadbalancer/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "cluster_name" {} - -variable "subnet_id" {} - -variable "floatingip_pool" {} - -variable "k8s_master_ips" {} - -variable "k8s_master_loadbalancer_enabled" {} - -variable "k8s_master_loadbalancer_listener_port" {} - -variable "k8s_master_loadbalancer_server_port" {} - -variable "k8s_master_loadbalancer_public_ip" {} diff --git a/contrib/terraform/openstack/modules/loadbalancer/versions.tf b/contrib/terraform/openstack/modules/loadbalancer/versions.tf deleted file mode 100644 index 6c942790da8..00000000000 --- a/contrib/terraform/openstack/modules/loadbalancer/versions.tf +++ /dev/null @@ -1,8 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 0.12.26" -} diff --git a/contrib/terraform/openstack/modules/network/main.tf b/contrib/terraform/openstack/modules/network/main.tf deleted file mode 100644 index a6324d7edab..00000000000 --- a/contrib/terraform/openstack/modules/network/main.tf +++ /dev/null @@ -1,34 +0,0 @@ -resource "openstack_networking_router_v2" "k8s" { - name = "${var.cluster_name}-router" - count = var.use_neutron == 1 && var.router_id == null ? 1 : 0 - admin_state_up = "true" - external_network_id = var.external_net -} - -data "openstack_networking_router_v2" "k8s" { - router_id = var.router_id - count = var.use_neutron == 1 && var.router_id != null ? 1 : 0 -} - -resource "openstack_networking_network_v2" "k8s" { - name = var.network_name - count = var.use_neutron - dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null - admin_state_up = "true" - port_security_enabled = var.port_security_enabled -} - -resource "openstack_networking_subnet_v2" "k8s" { - name = "${var.cluster_name}-internal-network" - count = var.use_neutron - network_id = openstack_networking_network_v2.k8s[count.index].id - cidr = var.subnet_cidr - ip_version = 4 - dns_nameservers = var.dns_nameservers -} - -resource "openstack_networking_router_interface_v2" "k8s" { - count = var.use_neutron - router_id = "%{if openstack_networking_router_v2.k8s != []}${openstack_networking_router_v2.k8s[count.index].id}%{else}${var.router_id}%{endif}" - subnet_id = openstack_networking_subnet_v2.k8s[count.index].id -} diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf deleted file mode 100644 index 0e8a5004f33..00000000000 --- a/contrib/terraform/openstack/modules/network/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "router_id" { - value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}" -} - -output "network_id" { - value = element(concat(openstack_networking_network_v2.k8s.*.id, [""]),0) -} - -output "router_internal_port_id" { - value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0) -} - -output "subnet_id" { - value = element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0) -} diff --git a/contrib/terraform/openstack/modules/network/variables.tf b/contrib/terraform/openstack/modules/network/variables.tf deleted file mode 100644 index 6cd7ff72e5b..00000000000 --- a/contrib/terraform/openstack/modules/network/variables.tf +++ /dev/null @@ -1,21 +0,0 @@ -variable "external_net" {} - -variable "network_name" {} - -variable "network_dns_domain" {} - -variable "cluster_name" {} - -variable "dns_nameservers" { - type = list -} - -variable "port_security_enabled" { - type = bool -} - -variable "subnet_cidr" {} - -variable "use_neutron" {} - -variable "router_id" {} diff --git a/contrib/terraform/openstack/modules/network/versions.tf b/contrib/terraform/openstack/modules/network/versions.tf deleted file mode 100644 index 6c942790da8..00000000000 --- a/contrib/terraform/openstack/modules/network/versions.tf +++ /dev/null @@ -1,8 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - } - } - required_version = ">= 0.12.26" -} diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tfvars b/contrib/terraform/openstack/sample-inventory/cluster.tfvars deleted file mode 100644 index 8ab7c6d38e4..00000000000 --- a/contrib/terraform/openstack/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,89 +0,0 @@ -# your Kubernetes cluster name here -cluster_name = "i-didnt-read-the-docs" - -# list of availability zones available in your OpenStack cluster -#az_list = ["nova"] - -# SSH key to use for access to nodes -public_key_path = "~/.ssh/id_rsa.pub" - -# image to use for bastion, masters, standalone etcd instances, and nodes -image = "" - -# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.) -ssh_user = "" - -# 0|1 bastion nodes -number_of_bastions = 0 - -#flavor_bastion = "" - -# standalone etcds -number_of_etcd = 0 - -# masters -number_of_k8s_masters = 1 - -number_of_k8s_masters_no_etcd = 0 - -number_of_k8s_masters_no_floating_ip = 0 - -number_of_k8s_masters_no_floating_ip_no_etcd = 0 - -flavor_k8s_master = "" - -k8s_masters = { - # "master-1" = { - # "az" = "nova" - # "flavor" = "" - # "floating_ip" = true - # "etcd" = true - # }, - # "master-2" = { - # "az" = "nova" - # "flavor" = "" - # "floating_ip" = false - # "etcd" = true - # }, - # "master-3" = { - # "az" = "nova" - # "flavor" = "" - # "floating_ip" = true - # "etcd" = true - # }, -} - - -# nodes -number_of_k8s_nodes = 2 - -number_of_k8s_nodes_no_floating_ip = 4 - -#flavor_k8s_node = "" - -# GlusterFS -# either 0 or more than one -#number_of_gfs_nodes_no_floating_ip = 0 -#gfs_volume_size_in_gb = 150 -# Container Linux does not support GlusterFS -#image_gfs = "" -# May be different from other nodes -#ssh_user_gfs = "ubuntu" -#flavor_gfs_node = "" - -# networking -network_name = "" - -# Use a existing network with the name of network_name. Set to false to create a network with name of network_name. -# use_existing_network = true - -external_net = "" - -subnet_cidr = "" - -floatingip_pool = "" - -bastion_allowed_remote_ips = ["0.0.0.0/0"] - -# Force port security to be null. Some cloud providers do not allow to set port security. -# force_null_port_security = false diff --git a/contrib/terraform/openstack/sample-inventory/group_vars b/contrib/terraform/openstack/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/openstack/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf deleted file mode 100644 index 90416df50d0..00000000000 --- a/contrib/terraform/openstack/variables.tf +++ /dev/null @@ -1,411 +0,0 @@ -variable "cluster_name" { - default = "example" -} - -variable "az_list" { - description = "List of Availability Zones to use for masters in your OpenStack cluster" - type = list(string) - default = ["nova"] -} - -variable "az_list_node" { - description = "List of Availability Zones to use for nodes in your OpenStack cluster" - type = list(string) - default = ["nova"] -} - -variable "number_of_bastions" { - default = 1 -} - -variable "number_of_k8s_masters" { - default = 2 -} - -variable "number_of_k8s_masters_no_etcd" { - default = 2 -} - -variable "number_of_etcd" { - default = 2 -} - -variable "number_of_k8s_masters_no_floating_ip" { - default = 2 -} - -variable "number_of_k8s_masters_no_floating_ip_no_etcd" { - default = 2 -} - -variable "number_of_k8s_nodes" { - default = 1 -} - -variable "number_of_k8s_nodes_no_floating_ip" { - default = 1 -} - -variable "number_of_gfs_nodes_no_floating_ip" { - default = 0 -} - -variable "bastion_root_volume_size_in_gb" { - default = 0 -} - -variable "etcd_root_volume_size_in_gb" { - default = 0 -} - -variable "master_root_volume_size_in_gb" { - default = 0 -} - -variable "node_root_volume_size_in_gb" { - default = 0 -} - -variable "gfs_root_volume_size_in_gb" { - default = 0 -} - -variable "gfs_volume_size_in_gb" { - default = 75 -} - -variable "master_volume_type" { - default = "Default" -} - -variable "node_volume_type" { - default = "Default" -} - -variable "public_key_path" { - description = "The path of the ssh pub key" - default = "~/.ssh/id_rsa.pub" -} - -variable "image" { - description = "the image to use" - default = "" -} - -variable "image_gfs" { - description = "Glance image to use for GlusterFS" - default = "" -} - -variable "ssh_user" { - description = "used to fill out tags for ansible inventory" - default = "ubuntu" -} - -variable "ssh_user_gfs" { - description = "used to fill out tags for ansible inventory" - default = "ubuntu" -} - -variable "flavor_bastion" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_k8s_master" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_k8s_node" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_etcd" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "flavor_gfs_node" { - description = "Use 'openstack flavor list' command to see what your OpenStack instance uses for IDs" - default = 3 -} - -variable "network_name" { - description = "name of the internal network to use" - default = "internal" -} - -variable "use_existing_network" { - description = "Use an existing network" - type = bool - default = "false" -} - -variable "network_dns_domain" { - description = "dns_domain for the internal network" - type = string - default = null -} - -variable "use_neutron" { - description = "Use neutron" - default = 1 -} - -variable "port_security_enabled" { - description = "Enable port security on the internal network" - type = bool - default = "true" -} - -variable "force_null_port_security" { - description = "Force port security to be null. Some providers does not allow setting port security" - type = bool - default = "false" -} - -variable "subnet_cidr" { - description = "Subnet CIDR block." - type = string - default = "10.0.0.0/24" -} - -variable "dns_nameservers" { - description = "An array of DNS name server names used by hosts in this subnet." - type = list(string) - default = [] -} - -variable "k8s_master_fips" { - description = "specific pre-existing floating IPs to use for master nodes" - type = list(string) - default = [] -} - -variable "bastion_fips" { - description = "specific pre-existing floating IPs to use for bastion node" - type = list(string) - default = [] -} - -variable "floatingip_pool" { - description = "name of the floating ip pool to use" - default = "external" -} - -variable "wait_for_floatingip" { - description = "Terraform will poll the instance until the floating IP has been associated." - default = "false" -} - -variable "external_net" { - description = "uuid of the external/public network" -} - -variable "supplementary_master_groups" { - description = "supplementary kubespray ansible groups for masters, such kube_node" - default = "" -} - -variable "supplementary_node_groups" { - description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress" - default = "" -} - -variable "bastion_allowed_remote_ips" { - description = "An array of CIDRs allowed to SSH to hosts" - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "bastion_allowed_remote_ipv6_ips" { - description = "An array of IPv6 CIDRs allowed to SSH to hosts" - type = list(string) - default = ["::/0"] -} - -variable "master_allowed_remote_ips" { - description = "An array of CIDRs allowed to access API of masters" - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "master_allowed_remote_ipv6_ips" { - description = "An array of IPv6 CIDRs allowed to access API of masters" - type = list(string) - default = ["::/0"] -} - -variable "k8s_allowed_remote_ips" { - description = "An array of CIDRs allowed to SSH to hosts" - type = list(string) - default = [] -} - -variable "k8s_allowed_remote_ips_ipv6" { - description = "An array of IPv6 CIDRs allowed to SSH to hosts" - type = list(string) - default = [] -} - -variable "k8s_allowed_egress_ips" { - description = "An array of CIDRs allowed for egress traffic" - type = list(string) - default = ["0.0.0.0/0"] -} - -variable "k8s_allowed_egress_ipv6_ips" { - description = "An array of CIDRs allowed for egress IPv6 traffic" - type = list(string) - default = ["::/0"] -} - -variable "master_allowed_ports" { - type = list(any) - - default = [] -} - -variable "master_allowed_ports_ipv6" { - type = list(any) - - default = [] -} - -variable "worker_allowed_ports" { - type = list(any) - - default = [ - { - "protocol" = "tcp" - "port_range_min" = 30000 - "port_range_max" = 32767 - "remote_ip_prefix" = "0.0.0.0/0" - }, - ] -} - -variable "worker_allowed_ports_ipv6" { - type = list(any) - - default = [ - { - "protocol" = "tcp" - "port_range_min" = 30000 - "port_range_max" = 32767 - "remote_ip_prefix" = "::/0" - }, - ] -} - -variable "bastion_allowed_ports" { - type = list(any) - - default = [] -} - -variable "bastion_allowed_ports_ipv6" { - type = list(any) - - default = [] -} - -variable "use_access_ip" { - default = 1 -} - -variable "master_server_group_policy" { - description = "desired server group policy, e.g. anti-affinity" - default = "" -} - -variable "node_server_group_policy" { - description = "desired server group policy, e.g. anti-affinity" - default = "" -} - -variable "etcd_server_group_policy" { - description = "desired server group policy, e.g. anti-affinity" - default = "" -} - -variable "router_id" { - description = "uuid of an externally defined router to use" - default = null -} - -variable "router_internal_port_id" { - description = "uuid of the port connection our router to our network" - default = null -} - -variable "k8s_masters" { - default = {} -} - -variable "k8s_nodes" { - default = {} -} - -variable "additional_server_groups" { - default = {} - type = map(object({ - policy = string - })) -} - -variable "extra_sec_groups" { - default = false -} - -variable "extra_sec_groups_name" { - default = "custom" -} - -variable "image_uuid" { - description = "uuid of image inside openstack to use" - default = "" -} - -variable "image_gfs_uuid" { - description = "uuid of image to be used on gluster fs nodes. If empty defaults to image_uuid" - default = "" -} - -variable "image_master" { - description = "uuid of image inside openstack to use" - default = "" -} - -variable "image_master_uuid" { - description = "uuid of image to be used on master nodes. If empty defaults to image_uuid" - default = "" -} - -variable "group_vars_path" { - description = "path to the inventory group vars directory" - type = string - default = "./group_vars" -} - -variable "k8s_master_loadbalancer_enabled" { - type = bool - default = "false" -} - -variable "k8s_master_loadbalancer_listener_port" { - type = string - default = "6443" -} - -variable "k8s_master_loadbalancer_server_port" { - type = string - default = 6443 -} - -variable "k8s_master_loadbalancer_public_ip" { - type = string - default = "" -} diff --git a/contrib/terraform/openstack/versions.tf b/contrib/terraform/openstack/versions.tf deleted file mode 100644 index 6e4c1045bcf..00000000000 --- a/contrib/terraform/openstack/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - openstack = { - source = "terraform-provider-openstack/openstack" - version = "~> 1.17" - } - } - required_version = ">= 1.3.0" -} diff --git a/contrib/terraform/terraform.py b/contrib/terraform/terraform.py deleted file mode 100755 index 9f6132711ed..00000000000 --- a/contrib/terraform/terraform.py +++ /dev/null @@ -1,475 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2015 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# original: https://github.com/CiscoCloud/terraform.py - -"""\ -Dynamic inventory for Terraform - finds all `.tfstate` files below the working -directory and generates an inventory based on them. -""" -import argparse -from collections import defaultdict -import random -from functools import wraps -import json -import os -import re - -VERSION = '0.4.0pre' - - -def tfstates(root=None): - root = root or os.getcwd() - for dirpath, _, filenames in os.walk(root): - for name in filenames: - if os.path.splitext(name)[-1] == '.tfstate': - yield os.path.join(dirpath, name) - -def convert_to_v3_structure(attributes, prefix=''): - """ Convert the attributes from v4 to v3 - Receives a dict and return a dictionary """ - result = {} - if isinstance(attributes, str): - # In the case when we receive a string (e.g. values for security_groups) - return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes} - for key, value in attributes.items(): - if isinstance(value, list): - if len(value): - result['{}{}.#'.format(prefix, key, hash)] = len(value) - for i, v in enumerate(value): - result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i))) - elif isinstance(value, dict): - result['{}{}.%'.format(prefix, key)] = len(value) - for k, v in value.items(): - result['{}{}.{}'.format(prefix, key, k)] = v - else: - result['{}{}'.format(prefix, key)] = value - return result - -def iterresources(filenames): - for filename in filenames: - with open(filename, 'r') as json_file: - state = json.load(json_file) - tf_version = state['version'] - if tf_version == 3: - for module in state['modules']: - name = module['path'][-1] - for key, resource in module['resources'].items(): - yield name, key, resource - elif tf_version == 4: - # In version 4 the structure changes so we need to iterate - # each instance inside the resource branch. - for resource in state['resources']: - name = resource['provider'].split('.')[-1] - for instance in resource['instances']: - key = "{}.{}".format(resource['type'], resource['name']) - if 'index_key' in instance: - key = "{}.{}".format(key, instance['index_key']) - data = {} - data['type'] = resource['type'] - data['provider'] = resource['provider'] - data['depends_on'] = instance.get('depends_on', []) - data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])} - if 'id' in instance['attributes']: - data['primary']['id'] = instance['attributes']['id'] - data['primary']['meta'] = instance['attributes'].get('meta',{}) - yield name, key, data - else: - raise KeyError('tfstate version %d not supported' % tf_version) - - -## READ RESOURCES -PARSERS = {} - - -def _clean_dc(dcname): - # Consul DCs are strictly alphanumeric with underscores and hyphens - - # ensure that the consul_dc attribute meets these requirements. - return re.sub(r'[^\w_\-]', '-', dcname) - - -def iterhosts(resources): - '''yield host tuples of (name, attributes, groups)''' - for module_name, key, resource in resources: - resource_type, name = key.split('.', 1) - try: - parser = PARSERS[resource_type] - except KeyError: - continue - - yield parser(resource, module_name) - - -def iterips(resources): - '''yield ip tuples of (port_id, ip)''' - for module_name, key, resource in resources: - resource_type, name = key.split('.', 1) - if resource_type == 'openstack_networking_floatingip_associate_v2': - yield openstack_floating_ips(resource) - - -def parses(prefix): - def inner(func): - PARSERS[prefix] = func - return func - - return inner - - -def calculate_mantl_vars(func): - """calculate Mantl vars""" - - @wraps(func) - def inner(*args, **kwargs): - name, attrs, groups = func(*args, **kwargs) - - # attrs - if attrs.get('role', '') == 'control': - attrs['consul_is_server'] = True - else: - attrs['consul_is_server'] = False - - # groups - if attrs.get('publicly_routable', False): - groups.append('publicly_routable') - - return name, attrs, groups - - return inner - - -def _parse_prefix(source, prefix, sep='.'): - for compkey, value in list(source.items()): - try: - curprefix, rest = compkey.split(sep, 1) - except ValueError: - continue - - if curprefix != prefix or rest == '#': - continue - - yield rest, value - - -def parse_attr_list(source, prefix, sep='.'): - attrs = defaultdict(dict) - for compkey, value in _parse_prefix(source, prefix, sep): - idx, key = compkey.split(sep, 1) - attrs[idx][key] = value - - return list(attrs.values()) - - -def parse_dict(source, prefix, sep='.'): - return dict(_parse_prefix(source, prefix, sep)) - - -def parse_list(source, prefix, sep='.'): - return [value for _, value in _parse_prefix(source, prefix, sep)] - - -def parse_bool(string_form): - if type(string_form) is bool: - return string_form - - token = string_form.lower()[0] - - if token == 't': - return True - elif token == 'f': - return False - else: - raise ValueError('could not convert %r to a bool' % string_form) - -def sanitize_groups(groups): - _groups = [] - chars_to_replace = ['+', '-', '=', '.', '/', ' '] - for i in groups: - _i = i - for char in chars_to_replace: - _i = _i.replace(char, '_') - _groups.append(_i) - groups.clear() - groups.extend(_groups) - -@parses('equinix_metal_device') -def equinix_metal_device(resource, tfvars=None): - raw_attrs = resource['primary']['attributes'] - name = raw_attrs['hostname'] - groups = [] - - attrs = { - 'id': raw_attrs['id'], - 'facilities': parse_list(raw_attrs, 'facilities'), - 'hostname': raw_attrs['hostname'], - 'operating_system': raw_attrs['operating_system'], - 'locked': parse_bool(raw_attrs['locked']), - 'tags': parse_list(raw_attrs, 'tags'), - 'plan': raw_attrs['plan'], - 'project_id': raw_attrs['project_id'], - 'state': raw_attrs['state'], - # ansible - 'ansible_host': raw_attrs['network.0.address'], - 'ansible_ssh_user': 'root', # Use root by default in metal - # generic - 'ipv4_address': raw_attrs['network.0.address'], - 'public_ipv4': raw_attrs['network.0.address'], - 'ipv6_address': raw_attrs['network.1.address'], - 'public_ipv6': raw_attrs['network.1.address'], - 'private_ipv4': raw_attrs['network.2.address'], - 'provider': 'equinix', - } - - if raw_attrs['operating_system'] == 'flatcar_stable': - # For Flatcar set the ssh_user to core - attrs.update({'ansible_ssh_user': 'core'}) - - # add groups based on attrs - groups.append('equinix_metal_operating_system_%s' % attrs['operating_system']) - groups.append('equinix_metal_locked_%s' % attrs['locked']) - groups.append('equinix_metal_state_%s' % attrs['state']) - groups.append('equinix_metal_plan_%s' % attrs['plan']) - - # groups specific to kubespray - groups = groups + attrs['tags'] - sanitize_groups(groups) - - return name, attrs, groups - - -def openstack_floating_ips(resource): - raw_attrs = resource['primary']['attributes'] - attrs = { - 'ip': raw_attrs['floating_ip'], - 'port_id': raw_attrs['port_id'], - } - return attrs - -def openstack_floating_ips(resource): - raw_attrs = resource['primary']['attributes'] - return raw_attrs['port_id'], raw_attrs['floating_ip'] - -@parses('openstack_compute_instance_v2') -@calculate_mantl_vars -def openstack_host(resource, module_name): - raw_attrs = resource['primary']['attributes'] - name = raw_attrs['name'] - groups = [] - - attrs = { - 'access_ip_v4': raw_attrs['access_ip_v4'], - 'access_ip_v6': raw_attrs['access_ip_v6'], - 'access_ip': raw_attrs['access_ip_v4'], - 'access_ip6': raw_attrs['access_ip_v6'], - 'ip': raw_attrs['network.0.fixed_ip_v4'], - 'flavor': parse_dict(raw_attrs, 'flavor', - sep='_'), - 'id': raw_attrs['id'], - 'image': parse_dict(raw_attrs, 'image', - sep='_'), - 'key_pair': raw_attrs['key_pair'], - 'metadata': parse_dict(raw_attrs, 'metadata'), - 'network': parse_attr_list(raw_attrs, 'network'), - 'region': raw_attrs.get('region', ''), - 'security_groups': parse_list(raw_attrs, 'security_groups'), - # workaround for an OpenStack bug where hosts have a different domain - # after they're restarted - 'host_domain': 'novalocal', - 'use_host_domain': True, - # generic - 'public_ipv4': raw_attrs['access_ip_v4'], - 'private_ipv4': raw_attrs['access_ip_v4'], - 'port_id' : raw_attrs['network.0.port'], - 'provider': 'openstack', - } - - if 'floating_ip' in raw_attrs: - attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4'] - - if 'metadata.use_access_ip' in raw_attrs and raw_attrs['metadata.use_access_ip'] == "0": - attrs.pop('access_ip') - - try: - if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1": - attrs.update({ - 'ansible_host': re.sub(r"[\[\]]", "", raw_attrs['access_ip_v6']), - 'publicly_routable': True, - }) - else: - attrs.update({ - 'ansible_host': raw_attrs['access_ip_v4'], - 'publicly_routable': True, - }) - except (KeyError, ValueError): - attrs.update({'ansible_host': '', 'publicly_routable': False}) - - # Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017 - - # attrs specific to Ansible - if 'metadata.ssh_user' in raw_attrs: - attrs['ansible_user'] = raw_attrs['metadata.ssh_user'] - if 'metadata.ssh_port' in raw_attrs: - attrs['ansible_port'] = raw_attrs['metadata.ssh_port'] - - if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0: - device_index = 1 - for key, value in list(raw_attrs.items()): - match = re.search("^volume.*.device$", key) - if match: - attrs['disk_volume_device_'+str(device_index)] = value - device_index += 1 - - - # attrs specific to Mantl - attrs.update({ - 'role': attrs['metadata'].get('role', 'none') - }) - - # add groups based on attrs - groups.append('os_image=' + str(attrs['image']['id'])) - groups.append('os_flavor=' + str(attrs['flavor']['name'])) - groups.extend('os_metadata_%s=%s' % item - for item in list(attrs['metadata'].items())) - groups.append('os_region=' + str(attrs['region'])) - - # groups specific to kubespray - for group in attrs['metadata'].get('kubespray_groups', "").split(","): - groups.append(group) - - sanitize_groups(groups) - - return name, attrs, groups - - -def iter_host_ips(hosts, ips): - '''Update hosts that have an entry in the floating IP list''' - for host in hosts: - port_id = host[1]['port_id'] - - if port_id in ips: - ip = ips[port_id] - - host[1].update({ - 'access_ip_v4': ip, - 'access_ip': ip, - 'public_ipv4': ip, - 'ansible_host': ip, - }) - - if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0" and 'access_ip' in host[1]: - host[1].pop('access_ip') - - yield host - - -## QUERY TYPES -def query_host(hosts, target): - for name, attrs, _ in hosts: - if name == target: - return attrs - - return {} - - -def query_list(hosts): - groups = defaultdict(dict) - meta = {} - - for name, attrs, hostgroups in hosts: - for group in set(hostgroups): - # Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf - # Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all" - if not group: group = "all" - - groups[group].setdefault('hosts', []) - groups[group]['hosts'].append(name) - - meta[name] = attrs - - groups['_meta'] = {'hostvars': meta} - return groups - - -def query_hostfile(hosts): - out = ['## begin hosts generated by terraform.py ##'] - out.extend( - '{}\t{}'.format(attrs['ansible_host'].ljust(16), name) - for name, attrs, _ in hosts - ) - - out.append('## end hosts generated by terraform.py ##') - return '\n'.join(out) - - -def main(): - parser = argparse.ArgumentParser( - __file__, __doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - modes = parser.add_mutually_exclusive_group(required=True) - modes.add_argument('--list', - action='store_true', - help='list all variables') - modes.add_argument('--host', help='list variables for a single host') - modes.add_argument('--version', - action='store_true', - help='print version and exit') - modes.add_argument('--hostfile', - action='store_true', - help='print hosts as a /etc/hosts snippet') - parser.add_argument('--pretty', - action='store_true', - help='pretty-print output JSON') - parser.add_argument('--nometa', - action='store_true', - help='with --list, exclude hostvars') - default_root = os.environ.get('TERRAFORM_STATE_ROOT', - os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', '..', ))) - parser.add_argument('--root', - default=default_root, - help='custom root to search for `.tfstate`s in') - - args = parser.parse_args() - - if args.version: - print('%s %s' % (__file__, VERSION)) - parser.exit() - - hosts = iterhosts(iterresources(tfstates(args.root))) - - # Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts - ips = dict(iterips(iterresources(tfstates(args.root)))) - - if ips: - hosts = iter_host_ips(hosts, ips) - - if args.list: - output = query_list(hosts) - if args.nometa: - del output['_meta'] - print(json.dumps(output, indent=4 if args.pretty else None)) - elif args.host: - output = query_host(hosts, args.host) - print(json.dumps(output, indent=4 if args.pretty else None)) - elif args.hostfile: - output = query_hostfile(hosts) - print(output) - - parser.exit() - - -if __name__ == '__main__': - main() diff --git a/contrib/terraform/upcloud/README.md b/contrib/terraform/upcloud/README.md deleted file mode 100644 index 4657de991c6..00000000000 --- a/contrib/terraform/upcloud/README.md +++ /dev/null @@ -1,173 +0,0 @@ -# Kubernetes on UpCloud with Terraform - -Provision a Kubernetes cluster on [UpCloud](https://upcloud.com/) using Terraform and Kubespray - -## Requirements - -* Terraform 0.13.0 or newer - -## Quickstart - -NOTE: Assumes you are at the root of the kubespray repo. - -For authentication in your cluster you can use the environment variables. - -```bash -export TF_VAR_UPCLOUD_USERNAME=username -export TF_VAR_UPCLOUD_PASSWORD=password -``` - -To allow API access to your UpCloud account, you need to allow API connections by visiting [Account-page](https://hub.upcloud.com/account) in your UpCloud Hub. - -Copy the cluster configuration file. - -```bash -CLUSTER=my-upcloud-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/upcloud/cluster-settings.tfvars inventory/$CLUSTER/ -export ANSIBLE_CONFIG=ansible.cfg -cd inventory/$CLUSTER -``` - -Edit `cluster-settings.tfvars` to match your requirement. - -Run Terraform to create the infrastructure. - -```bash -terraform init ../../contrib/terraform/upcloud -terraform apply --var-file cluster-settings.tfvars \ - -state=tfstate-$CLUSTER.tfstate \ - ../../contrib/terraform/upcloud/ -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can use the inventory file with kubespray to set up a cluster. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -You can setup Kubernetes with kubespray using the generated inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Teardown - -You can teardown your infrastructure using the following Terraform command: - -```bash -terraform destroy --var-file cluster-settings.tfvars \ - -state=tfstate-$CLUSTER.tfstate \ - ../../contrib/terraform/upcloud/ -``` - -## Variables - -* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix -* `template_name`: The name or UUID of a base image -* `username`: a user to access the nodes, defaults to "ubuntu" -* `private_network_cidr`: CIDR to use for the private network, defaults to "172.16.0.0/24" -* `dns_servers`: DNS servers that will be used by the nodes. Until [this is solved](https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562) this is done using user_data to reconfigure resolved. Defaults to `[]` -* `use_public_ips`: If a NIC connencted to the Public network should be attached to all nodes by default. Can be overridden by `force_public_ip` if this is set to `false`. Defaults to `true` -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `zone`: The zone where to run the cluster -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `plan`: Preconfigured cpu/mem plan to use (disables `cpu` and `mem` attributes below) - * `cpu`: number of cpu cores - * `mem`: memory size in MB - * `disk_size`: The size of the storage in GB - * `force_public_ip`: If `use_public_ips` is set to `false`, this forces a public NIC onto the machine anyway when set to `true`. Useful if you're migrating from public nodes to only private. Defaults to `false` - * `dns_servers`: This works the same way as the global `dns_severs` but only applies to a single node. If set to `[]` while the global `dns_servers` is set to something else, then it will not add the user_data and thus will not be recreated. Useful if you're migrating from public nodes to only private. Defaults to `null` - * `additional_disks`: Additional disks to attach to the node. - * `size`: The size of the additional disk in GB - * `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm) -* `firewall_enabled`: Enable firewall rules -* `firewall_default_deny_in`: Set the firewall to deny inbound traffic by default. Automatically adds UpCloud DNS server and NTP port allowlisting. -* `firewall_default_deny_out`: Set the firewall to deny outbound traffic by default. -* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `master_allowed_ports`: List of port ranges that should be allowed to access the masters - * `protocol`: Protocol *(tcp|udp|icmp)* - * `port_range_min`: Start of port range to allow - * `port_range_max`: End of port range to allow - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `worker_allowed_ports`: List of port ranges that should be allowed to access the workers - * `protocol`: Protocol *(tcp|udp|icmp)* - * `port_range_min`: Start of port range to allow - * `port_range_max`: End of port range to allow - * `start_address`: Start of address range to allow - * `end_address`: End of address range to allow -* `loadbalancer_enabled`: Enable managed load balancer -* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)* -* `loadbalancer_legacy_network`: If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false (default value) -* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends - * `port`: Port to load balance. - * `target_port`: Port to the backend servers. - * `backend_servers`: List of servers that traffic to the port should be forwarded to. - * `proxy_protocol`: If the loadbalancer should set up the backend using proxy protocol. -* `router_enable`: If a router should be connected to the private network or not -* `gateways`: Gateways that should be connected to the router, requires router_enable is set to true - * `features`: List of features for the gateway - * `plan`: Plan to use for the gateway - * `connections`: The connections and tunnel to create for the gateway - * `type`: What type of connection - * `local_routes`: Map of local routes for the connection - * `type`: Type of route - * `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix - * `remote_routes`: Map of local routes for the connection - * `type`: Type of route - * `static_network`: Destination prefix of the route; needs to be a valid IPv4 prefix - * `tunnels`: The tunnels to create for this connection - * `remote_address`: The remote address for the tunnel - * `ipsec_properties`: Set properties of IPSec, if not set, defaults will be used - * `child_rekey_time`: IKE child SA rekey time in seconds - * `dpd_delay`: Delay before sending Dead Peer Detection packets if no traffic is detected, in seconds - * `dpd_timeout`: Timeout period for DPD reply before considering the peer to be dead, in seconds - * `ike_lifetime`: Maximum IKE SA lifetime in seconds() - * `rekey_time`: IKE SA rekey time in seconds - * `phase1_algorithms`: List of Phase 1: Proposal algorithms - * `phase1_dh_group_numbers`: List of Phase 1 Diffie-Hellman group numbers - * `phase1_integrity_algorithms`: List of Phase 1 integrity algorithms - * `phase2_algorithms`: List of Phase 2: Security Association algorithms - * `phase2_dh_group_numbers`: List of Phase 2 Diffie-Hellman group numbers - * `phase2_integrity_algorithms`: List of Phase 2 integrity algorithms -* `gateway_vpn_psks`: Separate variable for providing psks for connection tunnels. Environment variable can be exported in the following format `export TF_VAR_gateway_vpn_psks='{"${gateway-name}-${connecton-name}-tunnel":{psk:"..."}}'` -* `static_routes`: Static routes to apply to the router, requires `router_enable` is set to true -* `network_peerings`: Other UpCloud private networks to peer with, requires `router_enable` is set to true -* `server_groups`: Group servers together - * `servers`: The servers that should be included in the group. - * `anti_affinity_policy`: Defines if a server group is an anti-affinity group. Setting this to "strict" or yes" will result in all servers in the group being placed on separate compute hosts. The value can be "strict", "yes" or "no". "strict" refers to strict policy doesn't allow servers in the same server group to be on the same host. "yes" refers to best-effort policy and tries to put servers on different hosts, but this is not guaranteed. - -## Migration - -When `null_resource.inventories` and `data.template_file.inventory` was changed to `local_file.inventory` the old state file needs to be cleaned of the old state. -The error messages you'll see if you encounter this is: - -```text -Error: failed to read schema for null_resource.inventories in registry.terraform.io/hashicorp/null: failed to instantiate provider "registry.terraform.io/hashicorp/null" to obtain schema: unavailable provider "registry.terraform.io/hashicorp/null" -Error: failed to read schema for data.template_file.inventory in registry.terraform.io/hashicorp/template: failed to instantiate provider "registry.terraform.io/hashicorp/template" to obtain schema: unavailable provider "registry.terraform.io/hashicorp/template" -``` - -This can be fixed with the following lines - -```bash -terraform state rm -state=terraform.tfstate null_resource.inventories -terraform state rm -state=terraform.tfstate data.template_file.inventory -``` - -### Public to Private only migration - -Since there's no way to remove the public NIC on a machine without recreating its private NIC it's not possible to inplace change a cluster to only use private IPs. -The way to migrate is to first set `use_public_ips` to `false`, `dns_servers` to some DNS servers and then update all existing servers to have `force_public_ip` set to `true` and `dns_severs` set to `[]`. -After that you can add new nodes without `force_public_ip` and `dns_servers` set and create them. -Add the new nodes into the cluster and when all of them are added, remove the old nodes. diff --git a/contrib/terraform/upcloud/cluster-settings.tfvars b/contrib/terraform/upcloud/cluster-settings.tfvars deleted file mode 100644 index 7c592462816..00000000000 --- a/contrib/terraform/upcloud/cluster-settings.tfvars +++ /dev/null @@ -1,198 +0,0 @@ -# See: https://developers.upcloud.com/1.3/5-zones/ -zone = "fi-hel1" -private_cloud = false - -# Only used if private_cloud = true, public zone equivalent -# For example use finnish public zone for finnish private zone -public_zone = "fi-hel2" - -username = "ubuntu" - -# Prefix to use for all resources to separate them from other resources -prefix = "kubespray" - -inventory_file = "inventory.ini" - -# Set the operating system using UUID or exact name -template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa public key 1", - "ssh-rsa public key 2", -] - -# check list of available plan https://developers.upcloud.com/1.3/7-plans/ -machines = { - "control-plane-0" : { - "node_type" : "master", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : {} - }, - "worker-0" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-1" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-2" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - } -} - -firewall_enabled = false -firewall_default_deny_in = false -firewall_default_deny_out = false - -master_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -k8s_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -master_allowed_ports = [] -worker_allowed_ports = [] - -loadbalancer_enabled = false -loadbalancer_plan = "development" -loadbalancers = { - # "http" : { - # "proxy_protocol" : false - # "port" : 80, - # "target_port" : 80, - # "backend_servers" : [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # } -} - -server_groups = { - # "control-plane" = { - # servers = [ - # "control-plane-0" - # ] - # anti_affinity_policy = "strict" - # }, - # "workers" = { - # servers = [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # anti_affinity_policy = "yes" - # } -} - -router_enable = false -gateways = { - # "gateway" : { - # features: [ "vpn" ] - # plan = "production" - # connections = { - # "connection" = { - # name = "connection" - # type = "ipsec" - # remote_routes = { - # "them" = { - # type = "static" - # static_network = "1.2.3.4/24" - # } - # } - # local_routes = { - # "me" = { - # type = "static" - # static_network = "4.3.2.1/24" - # } - # } - # tunnels = { - # "tunnel1" = { - # remote_address = "1.2.3.4" - # } - # } - # } - # } - # } -} -# gateway_vpn_psks = {} # Should be loaded as an environment variable -static_routes = { - # "route": { - # route: "1.2.3.4/24" - # nexthop: "4.3.2.1" - # } -} -network_peerings = { - # "peering": { - # remote_network: "uuid" - # } -} diff --git a/contrib/terraform/upcloud/main.tf b/contrib/terraform/upcloud/main.tf deleted file mode 100644 index 9ea73b7bbf2..00000000000 --- a/contrib/terraform/upcloud/main.tf +++ /dev/null @@ -1,65 +0,0 @@ - -terraform { - required_version = ">= 0.13.0" -} -provider "upcloud" { - # Your UpCloud credentials are read from environment variables: - username = var.UPCLOUD_USERNAME - password = var.UPCLOUD_PASSWORD -} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - - prefix = var.prefix - zone = var.zone - private_cloud = var.private_cloud - public_zone = var.public_zone - - template_name = var.template_name - username = var.username - - private_network_cidr = var.private_network_cidr - dns_servers = var.dns_servers - use_public_ips = var.use_public_ips - - machines = var.machines - - ssh_public_keys = var.ssh_public_keys - - firewall_enabled = var.firewall_enabled - firewall_default_deny_in = var.firewall_default_deny_in - firewall_default_deny_out = var.firewall_default_deny_out - master_allowed_remote_ips = var.master_allowed_remote_ips - k8s_allowed_remote_ips = var.k8s_allowed_remote_ips - bastion_allowed_remote_ips = var.bastion_allowed_remote_ips - master_allowed_ports = var.master_allowed_ports - worker_allowed_ports = var.worker_allowed_ports - - loadbalancer_enabled = var.loadbalancer_enabled - loadbalancer_plan = var.loadbalancer_plan - loadbalancer_legacy_network = var.loadbalancer_legacy_network - loadbalancers = var.loadbalancers - - router_enable = var.router_enable - gateways = var.gateways - gateway_vpn_psks = var.gateway_vpn_psks - static_routes = var.static_routes - network_peerings = var.network_peerings - - server_groups = var.server_groups -} - -# -# Generate ansible inventory -# - -resource "local_file" "inventory" { - content = templatefile("${path.module}/templates/inventory.tpl", { - master_ip = module.kubernetes.master_ip - worker_ip = module.kubernetes.worker_ip - bastion_ip = module.kubernetes.bastion_ip - username = var.username - }) - filename = var.inventory_file -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf deleted file mode 100644 index 37ab2357385..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,904 +0,0 @@ -locals { - # Create a list of all disks to create - disks = flatten([ - for node_name, machine in var.machines : [ - for disk_name, disk in machine.additional_disks : { - disk = disk - disk_name = disk_name - node_name = node_name - } - ] - ]) - - lb_backend_servers = flatten([ - for lb_name, loadbalancer in var.loadbalancers : [ - for backend_server in loadbalancer.backend_servers : { - port = loadbalancer.target_port - lb_name = lb_name - server_name = backend_server - } - ] - ]) - - gateway_connections = flatten([ - for gateway_name, gateway in var.gateways : [ - for connection_name, connection in gateway.connections : { - "gateway_id" = upcloud_gateway.gateway[gateway_name].id - "gateway_name" = gateway_name - "connection_name" = connection_name - "type" = connection.type - "local_routes" = connection.local_routes - "remote_routes" = connection.remote_routes - } - ] - ]) - - gateway_connection_tunnels = flatten([ - for gateway_name, gateway in var.gateways : [ - for connection_name, connection in gateway.connections : [ - for tunnel_name, tunnel in connection.tunnels : { - "gateway_id" = upcloud_gateway.gateway[gateway_name].id - "gateway_name" = gateway_name - "connection_id" = upcloud_gateway_connection.gateway_connection["${gateway_name}-${connection_name}"].id - "connection_name" = connection_name - "tunnel_name" = tunnel_name - "local_address_name" = tolist(upcloud_gateway.gateway[gateway_name].address).0.name - "remote_address" = tunnel.remote_address - "ipsec_properties" = tunnel.ipsec_properties - } - ] - ] - ]) - - # If prefix is set, all resources will be prefixed with "${var.prefix}-" - # Else don't prefix with anything - resource-prefix = "%{if var.prefix != ""}${var.prefix}-%{endif}" - - master_ip = { - for instance in upcloud_server.master : - instance.hostname => { - for nic in instance.network_interface : - nic.type => nic.ip_address - if nic.ip_address != null - } - } - worker_ip = { - for instance in upcloud_server.worker : - instance.hostname => { - for nic in instance.network_interface : - nic.type => nic.ip_address - if nic.ip_address != null - } - } - - bastion_ip = { - for instance in upcloud_server.bastion : - instance.hostname => { - for nic in instance.network_interface : - nic.type => nic.ip_address - if nic.ip_address != null - } - } - - node_user_data = { - for name, machine in var.machines : - name => < 0 ) || ( length(var.dns_servers) > 0 && machine.dns_servers == null ) ~} -#!/bin/bash -echo -e "[Resolve]\nDNS=${ join(" ", length(machine.dns_servers != null ? machine.dns_servers : []) > 0 ? machine.dns_servers : var.dns_servers) }" > /etc/systemd/resolved.conf - -systemctl restart systemd-resolved -%{ endif ~} -EOF - } -} - -resource "upcloud_network" "private" { - name = "${local.resource-prefix}k8s-network" - zone = var.zone - - ip_network { - address = var.private_network_cidr - dhcp_default_route = var.router_enable - # TODO: When support for dhcp_dns for private networks are in, remove the user_data and enable it here. - # See more here https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562 - # dhcp_dns = length(var.private_network_dns) > 0 ? var.private_network_dns : null - dhcp = true - family = "IPv4" - } - - router = var.router_enable ? upcloud_router.router[0].id : null -} - -resource "upcloud_storage" "additional_disks" { - for_each = { - for disk in local.disks : "${disk.node_name}_${disk.disk_name}" => disk.disk - } - - size = each.value.size - tier = each.value.tier - title = "${local.resource-prefix}${each.key}" - zone = var.zone -} - -resource "upcloud_server" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - hostname = "${local.resource-prefix}${each.key}" - plan = each.value.plan - cpu = each.value.cpu - mem = each.value.mem - zone = var.zone - server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id - - template { - storage = var.template_name - size = each.value.disk_size - } - - dynamic "network_interface" { - for_each = each.value.force_public_ip || var.use_public_ips ? [1] : [] - - content { - type = "public" - } - } - - # Private network interface - network_interface { - type = "private" - network = upcloud_network.private.id - } - - # Ignore volumes created by csi-driver - lifecycle { - ignore_changes = [storage_devices] - } - - firewall = var.firewall_enabled - - dynamic "storage_devices" { - for_each = { - for disk_key_name, disk in upcloud_storage.additional_disks : - disk_key_name => disk - # Only add the disk if it matches the node name in the start of its name - if length(regexall("^${each.key}_.+", disk_key_name)) > 0 - } - - content { - storage = storage_devices.value.id - } - } - - # Include at least one public SSH key - login { - user = var.username - keys = var.ssh_public_keys - create_password = false - } - - metadata = local.node_user_data[each.key] != "" ? true : null - user_data = local.node_user_data[each.key] != "" ? local.node_user_data[each.key] : null -} - -resource "upcloud_server" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - hostname = "${local.resource-prefix}${each.key}" - plan = each.value.plan - cpu = each.value.cpu - mem = each.value.mem - zone = var.zone - server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id - - - template { - storage = var.template_name - size = each.value.disk_size - } - - dynamic "network_interface" { - for_each = each.value.force_public_ip || var.use_public_ips ? [1] : [] - - content { - type = "public" - } - } - - # Private network interface - network_interface { - type = "private" - network = upcloud_network.private.id - } - - # Ignore volumes created by csi-driver - lifecycle { - ignore_changes = [storage_devices] - } - - firewall = var.firewall_enabled - - dynamic "storage_devices" { - for_each = { - for disk_key_name, disk in upcloud_storage.additional_disks : - disk_key_name => disk - # Only add the disk if it matches the node name in the start of its name - if length(regexall("^${each.key}_.+", disk_key_name)) > 0 - } - - content { - storage = storage_devices.value.id - } - } - - # Include at least one public SSH key - login { - user = var.username - keys = var.ssh_public_keys - create_password = false - } - - metadata = local.node_user_data[each.key] != "" ? true : null - user_data = local.node_user_data[each.key] != "" ? local.node_user_data[each.key] : null -} - -resource "upcloud_server" "bastion" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "bastion" - } - - hostname = "${local.resource-prefix}${each.key}" - plan = each.value.plan - cpu = each.value.cpu - mem = each.value.mem - zone = var.zone - server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id - - - template { - storage = var.template_name - size = each.value.disk_size - } - - # Private network interface - network_interface { - type = "private" - network = upcloud_network.private.id - } - - # Private network interface - network_interface { - type = "public" - } - - firewall = var.firewall_enabled - - dynamic "storage_devices" { - for_each = { - for disk_key_name, disk in upcloud_storage.additional_disks : - disk_key_name => disk - # Only add the disk if it matches the node name in the start of its name - if length(regexall("^${each.key}_.+", disk_key_name)) > 0 - } - - content { - storage = storage_devices.value.id - } - } - - # Include at least one public SSH key - login { - user = var.username - keys = var.ssh_public_keys - create_password = false - } -} - -resource "upcloud_firewall_rules" "master" { - for_each = upcloud_server.master - server_id = each.value.id - - dynamic "firewall_rule" { - for_each = var.master_allowed_remote_ips - - content { - action = "accept" - comment = "Allow master API access from this network" - destination_port_end = "6443" - destination_port_start = "6443" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.master_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Deny master API access from other networks" - destination_port_end = "6443" - destination_port_start = "6443" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.k8s_allowed_remote_ips - - content { - action = "accept" - comment = "Allow SSH from this network" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Deny SSH from other networks" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.master_allowed_ports - - content { - action = "accept" - comment = "Allow access on this port" - destination_port_end = firewall_rule.value.port_range_max - destination_port_start = firewall_rule.value.port_range_min - direction = "in" - family = "IPv4" - protocol = firewall_rule.value.protocol - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.40.9" - source_address_start = "94.237.40.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.127.9" - source_address_start = "94.237.127.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3540:53::1" - source_address_start = "2a04:3540:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3544:53::1" - source_address_start = "2a04:3544:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - } - } - - firewall_rule { - action = var.firewall_default_deny_in ? "drop" : "accept" - direction = "in" - } - - firewall_rule { - action = var.firewall_default_deny_out ? "drop" : "accept" - direction = "out" - } -} - -resource "upcloud_firewall_rules" "k8s" { - for_each = upcloud_server.worker - server_id = each.value.id - - dynamic "firewall_rule" { - for_each = var.k8s_allowed_remote_ips - - content { - action = "accept" - comment = "Allow SSH from this network" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Deny SSH from other networks" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.worker_allowed_ports - - content { - action = "accept" - comment = "Allow access on this port" - destination_port_end = firewall_rule.value.port_range_max - destination_port_start = firewall_rule.value.port_range_min - direction = "in" - family = "IPv4" - protocol = firewall_rule.value.protocol - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.40.9" - source_address_start = "94.237.40.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "94.237.127.9" - source_address_start = "94.237.127.9" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3540:53::1" - source_address_start = "2a04:3540:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : [] - - content { - action = "accept" - comment = "UpCloud DNS" - source_port_end = "53" - source_port_start = "53" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - source_address_end = "2a04:3544:53::1" - source_address_start = "2a04:3544:53::1" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv4" - protocol = firewall_rule.value - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - dynamic "firewall_rule" { - for_each = var.firewall_default_deny_in ? ["udp"] : [] - - content { - action = "accept" - comment = "NTP Port" - source_port_end = "123" - source_port_start = "123" - direction = "in" - family = "IPv6" - protocol = firewall_rule.value - } - } - - firewall_rule { - action = var.firewall_default_deny_in ? "drop" : "accept" - direction = "in" - } - - firewall_rule { - action = var.firewall_default_deny_out ? "drop" : "accept" - direction = "out" - } -} - -resource "upcloud_firewall_rules" "bastion" { - for_each = upcloud_server.bastion - server_id = each.value.id - - dynamic "firewall_rule" { - for_each = var.bastion_allowed_remote_ips - - content { - action = "accept" - comment = "Allow bastion SSH access from this network" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = firewall_rule.value.end_address - source_address_start = firewall_rule.value.start_address - } - } - - dynamic "firewall_rule" { - for_each = length(var.bastion_allowed_remote_ips) > 0 ? [1] : [] - - content { - action = "drop" - comment = "Drop bastion SSH access from other networks" - destination_port_end = "22" - destination_port_start = "22" - direction = "in" - family = "IPv4" - protocol = "tcp" - source_address_end = "255.255.255.255" - source_address_start = "0.0.0.0" - } - } - - firewall_rule { - action = var.firewall_default_deny_in ? "drop" : "accept" - direction = "in" - } - - firewall_rule { - action = var.firewall_default_deny_out ? "drop" : "accept" - direction = "out" - } -} - -resource "upcloud_loadbalancer" "lb" { - count = var.loadbalancer_enabled ? 1 : 0 - configured_status = "started" - name = "${local.resource-prefix}lb" - plan = var.loadbalancer_plan - zone = var.private_cloud ? var.public_zone : var.zone - network = var.loadbalancer_legacy_network ? upcloud_network.private.id : null - - dynamic "networks" { - for_each = var.loadbalancer_legacy_network ? [] : [1] - - content { - name = "Private-Net" - type = "private" - family = "IPv4" - network = upcloud_network.private.id - } - } - - dynamic "networks" { - for_each = var.loadbalancer_legacy_network ? [] : [1] - - content { - name = "Public-Net" - type = "public" - family = "IPv4" - } - } - - lifecycle { - ignore_changes = [ maintenance_dow, maintenance_time ] - } -} - -resource "upcloud_loadbalancer_backend" "lb_backend" { - for_each = var.loadbalancer_enabled ? var.loadbalancers : {} - - loadbalancer = upcloud_loadbalancer.lb[0].id - name = "lb-backend-${each.key}" - properties { - outbound_proxy_protocol = each.value.proxy_protocol ? "v2" : "" - } -} - -resource "upcloud_loadbalancer_frontend" "lb_frontend" { - for_each = var.loadbalancer_enabled ? var.loadbalancers : {} - - loadbalancer = upcloud_loadbalancer.lb[0].id - name = "lb-frontend-${each.key}" - mode = "tcp" - port = each.value.port - default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name - - dynamic "networks" { - for_each = var.loadbalancer_legacy_network ? [] : [1] - - content { - name = "Public-Net" - } - } - - dynamic "networks" { - for_each = each.value.allow_internal_frontend ? [1] : [] - - content{ - name = "Private-Net" - } - } -} - -resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" { - for_each = { - for be_server in local.lb_backend_servers : - "${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server - if var.loadbalancer_enabled - } - - backend = upcloud_loadbalancer_backend.lb_backend[each.value.lb_name].id - name = "${local.resource-prefix}${each.key}" - ip = merge(local.master_ip, local.worker_ip)["${local.resource-prefix}${each.value.server_name}"].private - port = each.value.port - weight = 100 - max_sessions = var.loadbalancer_plan == "production-small" ? 50000 : 1000 - enabled = true -} - -resource "upcloud_server_group" "server_groups" { - for_each = var.server_groups - title = each.key - anti_affinity_policy = each.value.anti_affinity_policy - labels = {} - # Managed upstream via upcloud_server resource - members = [] - lifecycle { - ignore_changes = [members] - } -} - -resource "upcloud_router" "router" { - count = var.router_enable ? 1 : 0 - - name = "${local.resource-prefix}router" - - dynamic "static_route" { - for_each = var.static_routes - - content { - name = static_route.key - - nexthop = static_route.value["nexthop"] - route = static_route.value["route"] - } - } - -} - -resource "upcloud_gateway" "gateway" { - for_each = var.router_enable ? var.gateways : {} - name = "${local.resource-prefix}${each.key}-gateway" - zone = var.private_cloud ? var.public_zone : var.zone - - features = each.value.features - plan = each.value.plan - - router { - id = upcloud_router.router[0].id - } -} - -resource "upcloud_gateway_connection" "gateway_connection" { - for_each = { - for gc in local.gateway_connections : "${gc.gateway_name}-${gc.connection_name}" => gc - } - - gateway = each.value.gateway_id - name = "${local.resource-prefix}${each.key}-gateway-connection" - type = each.value.type - - dynamic "local_route" { - for_each = each.value.local_routes - - content { - name = local_route.key - type = local_route.value["type"] - static_network = local_route.value["static_network"] - } - } - - dynamic "remote_route" { - for_each = each.value.remote_routes - - content { - name = remote_route.key - type = remote_route.value["type"] - static_network = remote_route.value["static_network"] - } - } -} - -resource "upcloud_gateway_connection_tunnel" "gateway_connection_tunnel" { - for_each = { - for gct in local.gateway_connection_tunnels : "${gct.gateway_name}-${gct.connection_name}-${gct.tunnel_name}-tunnel" => gct - } - - connection_id = each.value.connection_id - name = each.key - local_address_name = each.value.local_address_name - remote_address = each.value.remote_address - - ipsec_auth_psk { - psk = var.gateway_vpn_psks[each.key].psk - } - - dynamic "ipsec_properties" { - for_each = each.value.ipsec_properties != null ? { "ip": each.value.ipsec_properties } : {} - - content { - child_rekey_time = ipsec_properties.value["child_rekey_time"] - dpd_delay = ipsec_properties.value["dpd_delay"] - dpd_timeout = ipsec_properties.value["dpd_timeout"] - ike_lifetime = ipsec_properties.value["ike_lifetime"] - rekey_time = ipsec_properties.value["rekey_time"] - phase1_algorithms = ipsec_properties.value["phase1_algorithms"] - phase1_dh_group_numbers = ipsec_properties.value["phase1_dh_group_numbers"] - phase1_integrity_algorithms = ipsec_properties.value["phase1_integrity_algorithms"] - phase2_algorithms = ipsec_properties.value["phase2_algorithms"] - phase2_dh_group_numbers = ipsec_properties.value["phase2_dh_group_numbers"] - phase2_integrity_algorithms = ipsec_properties.value["phase2_integrity_algorithms"] - } - } -} - -resource "upcloud_network_peering" "peering" { - for_each = var.network_peerings - - name = "${local.resource-prefix}${each.key}" - - network { - uuid = upcloud_network.private.id - } - - peer_network { - uuid = each.value.remote_network - } -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf deleted file mode 100644 index e75b9faa077..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ip" { - value = local.master_ip -} - -output "worker_ip" { - value = local.worker_ip -} - -output "bastion_ip" { - value = local.bastion_ip -} - -output "loadbalancer_domain" { - value = var.loadbalancer_enabled ? upcloud_loadbalancer.lb[0].dns_name : null -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index eeb1a70c4f3..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,202 +0,0 @@ -variable "prefix" { - type = string -} - -variable "zone" { - type = string -} - -variable "private_cloud" { - type = bool -} - -variable "public_zone" { - type = string -} - -variable "template_name" {} - -variable "username" {} - -variable "private_network_cidr" {} - -variable "dns_servers" {} - -variable "use_public_ips" {} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - plan = string - cpu = optional(number) - mem = optional(number) - disk_size = number - server_group : string - force_public_ip : optional(bool, false) - dns_servers : optional(set(string)) - additional_disks = map(object({ - size = number - tier = string - })) - })) -} - -variable "ssh_public_keys" { - type = list(string) -} - -variable "firewall_enabled" { - type = bool -} - -variable "master_allowed_remote_ips" { - type = list(object({ - start_address = string - end_address = string - })) -} - -variable "k8s_allowed_remote_ips" { - type = list(object({ - start_address = string - end_address = string - })) -} - -variable "bastion_allowed_remote_ips" { - type = list(object({ - start_address = string - end_address = string - })) -} - -variable "master_allowed_ports" { - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "worker_allowed_ports" { - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "firewall_default_deny_in" { - type = bool -} - -variable "firewall_default_deny_out" { - type = bool -} - -variable "loadbalancer_enabled" { - type = bool -} - -variable "loadbalancer_plan" { - type = string -} - -variable "loadbalancer_legacy_network" { - type = bool - default = false -} - -variable "loadbalancers" { - description = "Load balancers" - - type = map(object({ - proxy_protocol = bool - port = number - target_port = number - allow_internal_frontend = optional(bool) - backend_servers = list(string) - })) -} - -variable "server_groups" { - description = "Server groups" - - type = map(object({ - anti_affinity_policy = string - })) -} - -variable "router_enable" { - description = "If a router should be enabled and connected to the private network or not" - - type = bool -} - -variable "gateways" { - description = "Gateways that should be connected to the router, requires router_enable is set to true" - - type = map(object({ - features = list(string) - plan = optional(string) - connections = optional(map(object({ - type = string - local_routes = optional(map(object({ - type = string - static_network = string - }))) - remote_routes = optional(map(object({ - type = string - static_network = string - }))) - tunnels = optional(map(object({ - remote_address = string - ipsec_properties = optional(object({ - child_rekey_time = number - dpd_delay = number - dpd_timeout = number - ike_lifetime = number - rekey_time = number - phase1_algorithms = set(string) - phase1_dh_group_numbers = set(string) - phase1_integrity_algorithms = set(string) - phase2_algorithms = set(string) - phase2_dh_group_numbers = set(string) - phase2_integrity_algorithms = set(string) - })) - }))) - }))) - })) -} - -variable "gateway_vpn_psks" { - description = "Separate variable for providing psks for connection tunnels" - - type = map(object({ - psk = string - })) - default = {} - sensitive = true -} - -variable "static_routes" { - description = "Static routes to apply to the router, requires router_enable is set to true" - - type = map(object({ - nexthop = string - route = string - })) -} - -variable "network_peerings" { - description = "Other UpCloud private networks to peer with, requires router_enable is set to true" - - type = map(object({ - remote_network = string - })) -} diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 4db5980d212..00000000000 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ - -terraform { - required_providers { - upcloud = { - source = "UpCloudLtd/upcloud" - version = "~>5.9.0" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/upcloud/output.tf b/contrib/terraform/upcloud/output.tf deleted file mode 100644 index d56d6e44619..00000000000 --- a/contrib/terraform/upcloud/output.tf +++ /dev/null @@ -1,16 +0,0 @@ - -output "master_ip" { - value = module.kubernetes.master_ip -} - -output "worker_ip" { - value = module.kubernetes.worker_ip -} - -output "bastion_ip" { - value = module.kubernetes.bastion_ip -} - -output "loadbalancer_domain" { - value = module.kubernetes.loadbalancer_domain -} diff --git a/contrib/terraform/upcloud/sample-inventory/cluster.tfvars b/contrib/terraform/upcloud/sample-inventory/cluster.tfvars deleted file mode 100644 index d1546004bcc..00000000000 --- a/contrib/terraform/upcloud/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,149 +0,0 @@ -# See: https://developers.upcloud.com/1.3/5-zones/ -zone = "fi-hel1" -username = "ubuntu" - -# Prefix to use for all resources to separate them from other resources -prefix = "kubespray" - -inventory_file = "inventory.ini" - -# Set the operating system using UUID or exact name -template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)" - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -# check list of available plan https://developers.upcloud.com/1.3/7-plans/ -machines = { - "control-plane-0" : { - "node_type" : "master", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : {} - }, - "worker-0" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-1" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - }, - "worker-2" : { - "node_type" : "worker", - # plan to use instead of custom cpu/mem - "plan" : null, - #number of cpu cores - "cpu" : "2", - #memory size in MB - "mem" : "4096" - # The size of the storage in GB - "disk_size" : 250 - "additional_disks" : { - # "some-disk-name-1": { - # "size": 100, - # "tier": "maxiops", - # }, - # "some-disk-name-2": { - # "size": 100, - # "tier": "maxiops", - # } - } - } -} - -firewall_enabled = false -firewall_default_deny_in = false -firewall_default_deny_out = false - - -master_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -k8s_allowed_remote_ips = [ - { - "start_address" : "0.0.0.0" - "end_address" : "255.255.255.255" - } -] - -master_allowed_ports = [] -worker_allowed_ports = [] - -loadbalancer_enabled = false -loadbalancer_plan = "development" -loadbalancers = { - # "http" : { - # "port" : 80, - # "target_port" : 80, - # "backend_servers" : [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # } -} - -server_groups = { - # "control-plane" = { - # servers = [ - # "control-plane-0" - # ] - # anti_affinity_policy = "strict" - # }, - # "workers" = { - # servers = [ - # "worker-0", - # "worker-1", - # "worker-2" - # ] - # anti_affinity_policy = "yes" - # } -} diff --git a/contrib/terraform/upcloud/sample-inventory/group_vars b/contrib/terraform/upcloud/sample-inventory/group_vars deleted file mode 120000 index 0d510620513..00000000000 --- a/contrib/terraform/upcloud/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars/ \ No newline at end of file diff --git a/contrib/terraform/upcloud/templates/inventory.tpl b/contrib/terraform/upcloud/templates/inventory.tpl deleted file mode 100644 index 02e4def3410..00000000000 --- a/contrib/terraform/upcloud/templates/inventory.tpl +++ /dev/null @@ -1,33 +0,0 @@ -[all] -%{ for name, ips in master_ip ~} -${name} ansible_user=${username} ansible_host=${lookup(ips, "public", ips.private)} ip=${ips.private} -%{ endfor ~} -%{ for name, ips in worker_ip ~} -${name} ansible_user=${username} ansible_host=${lookup(ips, "public", ips.private)} ip=${ips.private} -%{ endfor ~} - -[kube_control_plane] -%{ for name, ips in master_ip ~} -${name} -%{ endfor ~} - -[etcd] -%{ for name, ips in master_ip ~} -${name} -%{ endfor ~} - -[kube_node] -%{ for name, ips in worker_ip ~} -${name} -%{ endfor ~} - -[k8s_cluster:children] -kube_control_plane -kube_node - -%{ if length(bastion_ip) > 0 ~} -[bastion] -%{ for name, ips in bastion_ip ~} -bastion ansible_user=${username} ansible_host=${ips.public} -%{ endfor ~} -%{ endif ~} diff --git a/contrib/terraform/upcloud/variables.tf b/contrib/terraform/upcloud/variables.tf deleted file mode 100644 index a4ec44efc77..00000000000 --- a/contrib/terraform/upcloud/variables.tf +++ /dev/null @@ -1,259 +0,0 @@ -variable "prefix" { - type = string - default = "kubespray" - - description = "Prefix that is used to distinguish these resources from others" -} - -variable "zone" { - description = "The zone where to run the cluster" -} - -variable "private_cloud" { - description = "Whether the environment is in the private cloud region" - default = false -} - -variable "public_zone" { - description = "The public zone equivalent if the cluster is running in a private cloud zone" -} - -variable "template_name" { - description = "Block describing the preconfigured operating system" -} - -variable "username" { - description = "The username to use for the nodes" - default = "ubuntu" -} - -variable "private_network_cidr" { - description = "CIDR to use for the private network" - default = "172.16.0.0/24" -} - -variable "dns_servers" { - description = "DNS servers that will be used by the nodes. Until [this is solved](https://github.com/UpCloudLtd/terraform-provider-upcloud/issues/562) this is done using user_data to reconfigure resolved" - - type = set(string) - default = [] -} - -variable "use_public_ips" { - description = "If all nodes should get a public IP" - type = bool - default = true -} - -variable "machines" { - description = "Cluster machines" - - type = map(object({ - node_type = string - plan = string - cpu = optional(number) - mem = optional(number) - disk_size = number - server_group : string - force_public_ip : optional(bool, false) - dns_servers : optional(set(string)) - additional_disks = map(object({ - size = number - tier = string - })) - })) -} - -variable "ssh_public_keys" { - description = "List of public SSH keys which are injected into the VMs." - type = list(string) -} - -variable "inventory_file" { - description = "Where to store the generated inventory file" -} - -variable "UPCLOUD_USERNAME" { - description = "UpCloud username with API access" -} - -variable "UPCLOUD_PASSWORD" { - description = "Password for UpCloud API user" -} - -variable "firewall_enabled" { - description = "Enable firewall rules" - default = false -} - -variable "master_allowed_remote_ips" { - description = "List of IP start/end addresses allowed to access API of masters" - type = list(object({ - start_address = string - end_address = string - })) - default = [] -} - -variable "k8s_allowed_remote_ips" { - description = "List of IP start/end addresses allowed to SSH to hosts" - type = list(object({ - start_address = string - end_address = string - })) - default = [] -} - -variable "bastion_allowed_remote_ips" { - description = "List of IP start/end addresses allowed to SSH to bastion" - type = list(object({ - start_address = string - end_address = string - })) - default = [] -} - -variable "master_allowed_ports" { - description = "List of ports to allow on masters" - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "worker_allowed_ports" { - description = "List of ports to allow on workers" - type = list(object({ - protocol = string - port_range_min = number - port_range_max = number - start_address = string - end_address = string - })) -} - -variable "firewall_default_deny_in" { - description = "Add firewall policies that deny all inbound traffic by default" - default = false -} - -variable "firewall_default_deny_out" { - description = "Add firewall policies that deny all outbound traffic by default" - default = false -} - -variable "loadbalancer_enabled" { - description = "Enable load balancer" - default = false -} - -variable "loadbalancer_plan" { - description = "Load balancer plan (development/production-small)" - default = "development" -} - -variable "loadbalancer_legacy_network" { - description = "If the loadbalancer should use the deprecated network field instead of networks blocks. You probably want to have this set to false" - - type = bool - default = false -} - -variable "loadbalancers" { - description = "Load balancers" - - type = map(object({ - proxy_protocol = bool - port = number - target_port = number - allow_internal_frontend = optional(bool, false) - backend_servers = list(string) - })) - default = {} -} - -variable "server_groups" { - description = "Server groups" - - type = map(object({ - anti_affinity_policy = string - })) - - default = {} -} - -variable "router_enable" { - description = "If a router should be enabled and connected to the private network or not" - - type = bool - default = false -} - -variable "gateways" { - description = "Gateways that should be connected to the router, requires router_enable is set to true" - - type = map(object({ - features = list(string) - plan = optional(string) - connections = optional(map(object({ - type = string - local_routes = optional(map(object({ - type = string - static_network = string - })), {}) - remote_routes = optional(map(object({ - type = string - static_network = string - })), {}) - tunnels = optional(map(object({ - remote_address = string - ipsec_properties = optional(object({ - child_rekey_time = number - dpd_delay = number - dpd_timeout = number - ike_lifetime = number - rekey_time = number - phase1_algorithms = set(string) - phase1_dh_group_numbers = set(string) - phase1_integrity_algorithms = set(string) - phase2_algorithms = set(string) - phase2_dh_group_numbers = set(string) - phase2_integrity_algorithms = set(string) - })) - })), {}) - })), {}) - })) - default = {} -} - -variable "gateway_vpn_psks" { - description = "Separate variable for providing psks for connection tunnels" - - type = map(object({ - psk = string - })) - default = {} - sensitive = true -} - -variable "static_routes" { - description = "Static routes to apply to the router, requires router_enable is set to true" - - type = map(object({ - nexthop = string - route = string - })) - default = {} -} - -variable "network_peerings" { - description = "Other UpCloud private networks to peer with, requires router_enable is set to true" - - type = map(object({ - remote_network = string - })) - default = {} -} diff --git a/contrib/terraform/upcloud/versions.tf b/contrib/terraform/upcloud/versions.tf deleted file mode 100644 index 4db5980d212..00000000000 --- a/contrib/terraform/upcloud/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ - -terraform { - required_providers { - upcloud = { - source = "UpCloudLtd/upcloud" - version = "~>5.9.0" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/vsphere/README.md b/contrib/terraform/vsphere/README.md deleted file mode 100644 index 7aa50d899ea..00000000000 --- a/contrib/terraform/vsphere/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# Kubernetes on vSphere with Terraform - -Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/products/vsphere.html) using Terraform and Kubespray. - -## Overview - -The setup looks like following. - -```text - Kubernetes cluster -+-----------------------+ -| +--------------+ | -| | +--------------+ | -| | | | | -| | | Master/etcd | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -| ^ | -| | | -| v | -| +--------------+ | -| | +--------------+ | -| | | | | -| | | Worker | | -| | | node(s) | | -| +-+ | | -| +--------------+ | -+-----------------------+ -``` - -## Warning - -This setup assumes that the DHCP is disabled in the vSphere cluster and IP addresses have to be provided in the configuration file. - -## Requirements - -* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files) - -## Quickstart - -NOTE: *Assumes you are at the root of the kubespray repo* - -Copy the sample inventory for your cluster and copy the default terraform variables. - -```bash -CLUSTER=my-vsphere-cluster -cp -r inventory/sample inventory/$CLUSTER -cp contrib/terraform/vsphere/default.tfvars inventory/$CLUSTER/ -cd inventory/$CLUSTER -``` - -Edit `default.tfvars` to match your setup. You MUST set values specific for you network and vSphere cluster. - -```bash -# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc. -$EDITOR default.tfvars -``` - -For authentication in your vSphere cluster you can use the environment variables. - -```bash -export TF_VAR_vsphere_user=username -export TF_VAR_vsphere_password=password -``` - -Run Terraform to create the infrastructure. - -```bash -terraform init ../../contrib/terraform/vsphere -terraform apply \ - -var-file default.tfvars \ - -state=tfstate-$CLUSTER.tfstate \ - ../../contrib/terraform/vsphere -``` - -You should now have a inventory file named `inventory.ini` that you can use with kubespray. -You can now copy your inventory file and use it with kubespray to set up a cluster. -You can type `terraform output` to find out the IP addresses of the nodes. - -It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: - -```bash -ansible -i inventory.ini -m ping all -``` - -Example to use this with the default sample inventory: - -```bash -ansible-playbook -i inventory.ini ../../cluster.yml -b -v -``` - -## Variables - -### Required - -* `machines`: Machines to provision. Key of this object will be used as the name of the machine - * `node_type`: The role of this node *(master|worker)* - * `ip`: The IP address of the machine - * `netmask`: The netmask to use (to be used on the right hand side in CIDR notation, e.g., `24`) -* `network`: The name of the network to attach the machines to -* `gateway`: The IP address of the network gateway -* `vsphere_datacenter`: The identifier of vSphere data center -* `vsphere_compute_cluster`: The identifier of vSphere compute cluster -* `vsphere_datastore`: The identifier of vSphere data store -* `vsphere_server`: This is the vCenter server name or address for vSphere API operations. -* `ssh_public_keys`: List of public SSH keys to install on all machines -* `template_name`: The name of a base image (the OVF template be defined in vSphere beforehand) - -### Optional - -* `folder`: Name of the folder to put all machines in (default: `""`) -* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project (default: `"k8s"`) -* `inventory_file`: Name of the generated inventory file for Kubespray to use in the Ansible step (default: `inventory.ini`) -* `dns_primary`: The IP address of primary DNS server (default: `8.8.4.4`) -* `dns_secondary`: The IP address of secondary DNS server (default: `8.8.8.8`) -* `firmware`: Firmware to use (default: `bios`) -* `hardware_version`: The version of the hardware (default: `15`) -* `master_cores`: The number of CPU cores for the master nodes (default: 4) -* `master_memory`: The amount of RAM for the master nodes in MB (default: 4096) -* `master_disk_size`: The amount of disk space for the master nodes in GB (default: 20) -* `worker_cores`: The number of CPU cores for the worker nodes (default: 16) -* `worker_memory`: The amount of RAM for the worker nodes in MB (default: 8192) -* `worker_disk_size`: The amount of disk space for the worker nodes in GB (default: 100) -* `vapp`: Boolean to set the template type to vapp. (Default: false) -* `interface_name`: Name of the interface to configure. (Default: ens192) - -An example variables file can be found `default.tfvars` diff --git a/contrib/terraform/vsphere/default.tfvars b/contrib/terraform/vsphere/default.tfvars deleted file mode 100644 index fa169364114..00000000000 --- a/contrib/terraform/vsphere/default.tfvars +++ /dev/null @@ -1,38 +0,0 @@ -prefix = "k8s" - -inventory_file = "inventory.ini" - -network = "VM Network" - -machines = { - "master-0" : { - "node_type" : "master", - "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.10 - "netmask" : "24" - }, - "worker-0" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.20 - "netmask" : "24" - }, - "worker-1" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.21 - "netmask" : "24" - } -} - -gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.1 - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -vsphere_datacenter = "i-did-not-read-the-docs" -vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster -vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000 -vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com - -template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg diff --git a/contrib/terraform/vsphere/main.tf b/contrib/terraform/vsphere/main.tf deleted file mode 100644 index fb2d8c8327e..00000000000 --- a/contrib/terraform/vsphere/main.tf +++ /dev/null @@ -1,100 +0,0 @@ -provider "vsphere" { - # Username and password set through env vars VSPHERE_USER and VSPHERE_PASSWORD - user = var.vsphere_user - password = var.vsphere_password - - vsphere_server = var.vsphere_server - - # If you have a self-signed cert - allow_unverified_ssl = true -} - -data "vsphere_datacenter" "dc" { - name = var.vsphere_datacenter -} - -data "vsphere_datastore" "datastore" { - name = var.vsphere_datastore - datacenter_id = data.vsphere_datacenter.dc.id -} - -data "vsphere_network" "network" { - name = var.network - datacenter_id = data.vsphere_datacenter.dc.id -} - -data "vsphere_virtual_machine" "template" { - name = var.template_name - datacenter_id = data.vsphere_datacenter.dc.id -} - -data "vsphere_compute_cluster" "compute_cluster" { - name = var.vsphere_compute_cluster - datacenter_id = data.vsphere_datacenter.dc.id -} - -resource "vsphere_resource_pool" "pool" { - name = "${var.prefix}-cluster-pool" - parent_resource_pool_id = data.vsphere_compute_cluster.compute_cluster.resource_pool_id -} - -module "kubernetes" { - source = "./modules/kubernetes-cluster" - - prefix = var.prefix - - machines = var.machines - - ## Master ## - master_cores = var.master_cores - master_memory = var.master_memory - master_disk_size = var.master_disk_size - - ## Worker ## - worker_cores = var.worker_cores - worker_memory = var.worker_memory - worker_disk_size = var.worker_disk_size - - ## Global ## - - gateway = var.gateway - dns_primary = var.dns_primary - dns_secondary = var.dns_secondary - - pool_id = vsphere_resource_pool.pool.id - datastore_id = data.vsphere_datastore.datastore.id - - folder = var.folder - guest_id = data.vsphere_virtual_machine.template.guest_id - scsi_type = data.vsphere_virtual_machine.template.scsi_type - network_id = data.vsphere_network.network.id - adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0] - interface_name = var.interface_name - firmware = var.firmware - hardware_version = var.hardware_version - disk_thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned - - template_id = data.vsphere_virtual_machine.template.id - vapp = var.vapp - - ssh_public_keys = var.ssh_public_keys -} - -# -# Generate ansible inventory -# - -resource "local_file" "inventory" { - content = templatefile("${path.module}/templates/inventory.tpl", { - connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d", - keys(module.kubernetes.master_ip), - values(module.kubernetes.master_ip), - range(1, length(module.kubernetes.master_ip) + 1))), - connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s", - keys(module.kubernetes.worker_ip), - values(module.kubernetes.worker_ip))), - list_master = join("\n", formatlist("%s", keys(module.kubernetes.master_ip))), - list_worker = join("\n", formatlist("%s", keys(module.kubernetes.worker_ip))) - }) - filename = var.inventory_file -} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf deleted file mode 100644 index a44c2cfb0a4..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/main.tf +++ /dev/null @@ -1,149 +0,0 @@ -resource "vsphere_virtual_machine" "worker" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "worker" - } - - name = "${var.prefix}-${each.key}" - - resource_pool_id = var.pool_id - datastore_id = var.datastore_id - - num_cpus = var.worker_cores - memory = var.worker_memory - memory_reservation = var.worker_memory - guest_id = var.guest_id - enable_disk_uuid = "true" # needed for CSI provider - scsi_type = var.scsi_type - folder = var.folder - firmware = var.firmware - hardware_version = var.hardware_version - - wait_for_guest_net_routable = false - wait_for_guest_net_timeout = 0 - - network_interface { - network_id = var.network_id - adapter_type = var.adapter_type - } - - disk { - label = "disk0" - size = var.worker_disk_size - thin_provisioned = var.disk_thin_provisioned - } - - lifecycle { - ignore_changes = [disk] - } - - clone { - template_uuid = var.template_id - } - - cdrom { - client_device = true - } - - dynamic "vapp" { - for_each = var.vapp ? [1] : [] - - content { - properties = { - "user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - } - } - } - - extra_config = { - "isolation.tools.copy.disable" = "FALSE" - "isolation.tools.paste.disable" = "FALSE" - "isolation.tools.setGUIOptions.enable" = "TRUE" - "guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - "guestinfo.userdata.encoding" = "base64" - "guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}", - interface_name = var.interface_name - ip = each.value.ip, - netmask = each.value.netmask, - gw = var.gateway, - dns = var.dns_primary, - ssh_public_keys = var.ssh_public_keys })) - "guestinfo.metadata.encoding" = "base64" - } -} - -resource "vsphere_virtual_machine" "master" { - for_each = { - for name, machine in var.machines : - name => machine - if machine.node_type == "master" - } - - name = "${var.prefix}-${each.key}" - - resource_pool_id = var.pool_id - datastore_id = var.datastore_id - - num_cpus = var.master_cores - memory = var.master_memory - memory_reservation = var.master_memory - guest_id = var.guest_id - enable_disk_uuid = "true" # needed for CSI provider - scsi_type = var.scsi_type - folder = var.folder - firmware = var.firmware - hardware_version = var.hardware_version - - wait_for_guest_net_routable = false - wait_for_guest_net_timeout = 0 - - network_interface { - network_id = var.network_id - adapter_type = var.adapter_type - } - - disk { - label = "disk0" - size = var.master_disk_size - thin_provisioned = var.disk_thin_provisioned - } - - lifecycle { - ignore_changes = [disk] - } - - clone { - template_uuid = var.template_id - } - - cdrom { - client_device = true - } - - dynamic "vapp" { - for_each = var.vapp ? [1] : [] - - content { - properties = { - "user-data" = base64encode(templatefile("${path.module}/templates/vapp-cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - } - } - } - - extra_config = { - "isolation.tools.copy.disable" = "FALSE" - "isolation.tools.paste.disable" = "FALSE" - "isolation.tools.setGUIOptions.enable" = "TRUE" - "guestinfo.userdata" = base64encode(templatefile("${path.module}/templates/cloud-init.tpl", { ssh_public_keys = var.ssh_public_keys })) - "guestinfo.userdata.encoding" = "base64" - "guestinfo.metadata" = base64encode(templatefile("${path.module}/templates/metadata.tpl", { hostname = "${var.prefix}-${each.key}", - interface_name = var.interface_name - ip = each.value.ip, - netmask = each.value.netmask, - gw = var.gateway, - dns = var.dns_primary, - ssh_public_keys = var.ssh_public_keys })) - "guestinfo.metadata.encoding" = "base64" - } -} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf deleted file mode 100644 index 93752ab1e31..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "master_ip" { - value = { - for name, machine in var.machines : - "${var.prefix}-${name}" => machine.ip - if machine.node_type == "master" - } -} - -output "worker_ip" { - value = { - for name, machine in var.machines : - "${var.prefix}-${name}" => machine.ip - if machine.node_type == "worker" - } -} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl b/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl deleted file mode 100644 index 5f809af6a92..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl +++ /dev/null @@ -1,6 +0,0 @@ -#cloud-config - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl b/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl deleted file mode 100644 index 1553f08fe0a..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl +++ /dev/null @@ -1,14 +0,0 @@ -instance-id: ${hostname} -local-hostname: ${hostname} -network: - version: 2 - ethernets: - ${interface_name}: - match: - name: ${interface_name} - dhcp4: false - addresses: - - ${ip}/${netmask} - gateway4: ${gw} - nameservers: - addresses: [${dns}] diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl b/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl deleted file mode 100644 index 07d0778aa65..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/templates/vapp-cloud-init.tpl +++ /dev/null @@ -1,24 +0,0 @@ -#cloud-config - -ssh_authorized_keys: -%{ for ssh_public_key in ssh_public_keys ~} - - ${ssh_public_key} -%{ endfor ~} - -write_files: - - path: /etc/netplan/10-user-network.yaml - content: |. - network: - version: 2 - ethernets: - ${interface_name}: - dhcp4: false #true to use dhcp - addresses: - - ${ip}/${netmask} - gateway4: ${gw} # Set gw here - nameservers: - addresses: - - ${dns} # Set DNS ip address here - -runcmd: - - netplan apply diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf deleted file mode 100644 index cb99142321c..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf +++ /dev/null @@ -1,43 +0,0 @@ -## Global ## -variable "prefix" {} - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - ip = string - netmask = string - })) -} - -variable "gateway" {} -variable "dns_primary" {} -variable "dns_secondary" {} -variable "pool_id" {} -variable "datastore_id" {} -variable "guest_id" {} -variable "scsi_type" {} -variable "network_id" {} -variable "interface_name" {} -variable "adapter_type" {} -variable "disk_thin_provisioned" {} -variable "template_id" {} -variable "vapp" { - type = bool -} -variable "firmware" {} -variable "folder" {} -variable "ssh_public_keys" { - type = list(string) -} -variable "hardware_version" {} - -## Master ## -variable "master_cores" {} -variable "master_memory" {} -variable "master_disk_size" {} - -## Worker ## -variable "worker_cores" {} -variable "worker_memory" {} -variable "worker_disk_size" {} diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf deleted file mode 100644 index 8c622fdfc14..00000000000 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - vsphere = { - source = "hashicorp/vsphere" - version = ">= 1.24.3" - } - } - required_version = ">= 0.13" -} diff --git a/contrib/terraform/vsphere/output.tf b/contrib/terraform/vsphere/output.tf deleted file mode 100644 index a4338d9be3e..00000000000 --- a/contrib/terraform/vsphere/output.tf +++ /dev/null @@ -1,31 +0,0 @@ -output "master_ip_addresses" { - value = module.kubernetes.master_ip -} - -output "worker_ip_addresses" { - value = module.kubernetes.worker_ip -} - -output "vsphere_datacenter" { - value = var.vsphere_datacenter -} - -output "vsphere_server" { - value = var.vsphere_server -} - -output "vsphere_datastore" { - value = var.vsphere_datastore -} - -output "vsphere_network" { - value = var.network -} - -output "vsphere_folder" { - value = var.folder -} - -output "vsphere_pool" { - value = "${terraform.workspace}-cluster-pool" -} diff --git a/contrib/terraform/vsphere/sample-inventory/cluster.tfvars b/contrib/terraform/vsphere/sample-inventory/cluster.tfvars deleted file mode 100644 index dfa0a3d4fdd..00000000000 --- a/contrib/terraform/vsphere/sample-inventory/cluster.tfvars +++ /dev/null @@ -1,33 +0,0 @@ -prefix = "default" - -inventory_file = "inventory.ini" - -machines = { - "master-0" : { - "node_type" : "master", - "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 - }, - "worker-0" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 - }, - "worker-1" : { - "node_type" : "worker", - "ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24 - } -} - -gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2 - -ssh_public_keys = [ - # Put your public SSH key here - "ssh-rsa I-did-not-read-the-docs", - "ssh-rsa I-did-not-read-the-docs 2", -] - -vsphere_datacenter = "i-did-not-read-the-docs" -vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster -vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000 -vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com - -template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg diff --git a/contrib/terraform/vsphere/sample-inventory/group_vars b/contrib/terraform/vsphere/sample-inventory/group_vars deleted file mode 120000 index 37359582379..00000000000 --- a/contrib/terraform/vsphere/sample-inventory/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../../inventory/sample/group_vars \ No newline at end of file diff --git a/contrib/terraform/vsphere/templates/inventory.tpl b/contrib/terraform/vsphere/templates/inventory.tpl deleted file mode 100644 index 28ff28ac223..00000000000 --- a/contrib/terraform/vsphere/templates/inventory.tpl +++ /dev/null @@ -1,17 +0,0 @@ - -[all] -${connection_strings_master} -${connection_strings_worker} - -[kube_control_plane] -${list_master} - -[etcd] -${list_master} - -[kube_node] -${list_worker} - -[k8s_cluster:children] -kube_control_plane -kube_node diff --git a/contrib/terraform/vsphere/variables.tf b/contrib/terraform/vsphere/variables.tf deleted file mode 100644 index 03f9007e11d..00000000000 --- a/contrib/terraform/vsphere/variables.tf +++ /dev/null @@ -1,101 +0,0 @@ -## Global ## - -# Required variables - -variable "machines" { - description = "Cluster machines" - type = map(object({ - node_type = string - ip = string - netmask = string - })) -} - -variable "network" {} - -variable "gateway" {} - -variable "vsphere_datacenter" {} - -variable "vsphere_compute_cluster" {} - -variable "vsphere_datastore" {} - -variable "vsphere_user" {} - -variable "vsphere_password" { - sensitive = true -} - -variable "vsphere_server" {} - -variable "ssh_public_keys" { - description = "List of public SSH keys which are injected into the VMs." - type = list(string) -} - -variable "template_name" {} - -# Optional variables (ones where reasonable defaults exist) -variable "vapp" { - default = false -} - -variable "interface_name" { - default = "ens192" -} - -variable "folder" { - default = "" -} - -variable "prefix" { - default = "k8s" -} - -variable "inventory_file" { - default = "inventory.ini" -} - -variable "dns_primary" { - default = "8.8.4.4" -} - -variable "dns_secondary" { - default = "8.8.8.8" -} - -variable "firmware" { - default = "bios" -} - -variable "hardware_version" { - default = "15" -} - -## Master ## - -variable "master_cores" { - default = 4 -} - -variable "master_memory" { - default = 4096 -} - -variable "master_disk_size" { - default = "20" -} - -## Worker ## - -variable "worker_cores" { - default = 16 -} - -variable "worker_memory" { - default = 8192 -} -variable "worker_disk_size" { - default = "100" -} diff --git a/contrib/terraform/vsphere/versions.tf b/contrib/terraform/vsphere/versions.tf deleted file mode 100644 index 8c622fdfc14..00000000000 --- a/contrib/terraform/vsphere/versions.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - vsphere = { - source = "hashicorp/vsphere" - version = ">= 1.24.3" - } - } - required_version = ">= 0.13" -} diff --git a/docs/cloud_controllers/openstack.md b/docs/cloud_controllers/openstack.md deleted file mode 100644 index 7a80ff713bd..00000000000 --- a/docs/cloud_controllers/openstack.md +++ /dev/null @@ -1,134 +0,0 @@ -# OpenStack - -## Known compatible public clouds - -Kubespray has been tested on a number of OpenStack Public Clouds including (in alphabetical order): - -- [Auro](https://auro.io/) -- [Betacloud](https://www.betacloud.io/) -- [CityCloud](https://www.citycloud.com/) -- [DreamHost](https://www.dreamhost.com/cloud/computing/) -- [ELASTX](https://elastx.se/) -- [EnterCloudSuite](https://www.entercloudsuite.com/) -- [FugaCloud](https://fuga.cloud/) -- [Infomaniak](https://infomaniak.com) -- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars -- [OVHcloud](https://www.ovhcloud.com/) -- [Rackspace](https://www.rackspace.com/) -- [Ultimum](https://ultimum.io/) -- [VexxHost](https://vexxhost.com/) -- [Zetta](https://www.zetta.io/) - -## The OpenStack cloud provider - -The cloud provider is configured to have Octavia by default in Kubespray. - -- Enable the external OpenStack cloud provider in `group_vars/all/all.yml`: - - ```yaml - cloud_provider: external - external_cloud_provider: openstack - ``` - -- Enable Cinder CSI in `group_vars/all/openstack.yml`: - - ```yaml - cinder_csi_enabled: true - ``` - -- Enable topology support (optional), if your openstack provider has custom Zone names you can override the default "nova" zone by setting the variable `cinder_topology_zones` - - ```yaml - cinder_topology: true - ``` - -- Enabling `cinder_csi_ignore_volume_az: true`, ignores volumeAZ and schedules on any of the available node AZ. - - ```yaml - cinder_csi_ignore_volume_az: true - ``` - -- If you are using OpenStack loadbalancer(s) replace the `openstack_lbaas_subnet_id` with the new `external_openstack_lbaas_subnet_id`. **Note** The new cloud provider is using Octavia instead of Neutron LBaaS by default! - -- If you are in a case of a multi-nic OpenStack VMs (see [kubernetes/cloud-provider-openstack#407](https://github.com/kubernetes/cloud-provider-openstack/issues/407) and [#6083](https://github.com/kubernetes-sigs/kubespray/issues/6083) for explanation), you should override the default OpenStack networking configuration: - - ```yaml - external_openstack_network_ipv6_disabled: false - external_openstack_network_internal_networks: [] - external_openstack_network_public_networks: [] - ``` - -- You can override the default OpenStack metadata configuration (see [#6338](https://github.com/kubernetes-sigs/kubespray/issues/6338) for explanation): - - ```yaml - external_openstack_metadata_search_order: "configDrive,metadataService" - ``` - -- Available variables for configuring lbaas: - - ```yaml - external_openstack_lbaas_enabled: true - external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" - external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" - external_openstack_lbaas_method: ROUND_ROBIN - external_openstack_lbaas_provider: amphora - external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" - external_openstack_lbaas_member_subnet_id: "Neutron subnet ID on which to create the members of the load balancer" - external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" - external_openstack_lbaas_manage_security_groups: false - external_openstack_lbaas_create_monitor: false - external_openstack_lbaas_monitor_delay: 5s - external_openstack_lbaas_monitor_max_retries: 1 - external_openstack_lbaas_monitor_timeout: 3s - external_openstack_lbaas_internal_lb: false - - ``` - -- Run `source path/to/your/openstack-rc` to read your OpenStack credentials like `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, etc. Those variables are used for accessing OpenStack from the external cloud provider. -- Run the `cluster.yml` playbook - -## Additional step needed when using calico or kube-router - -Being L3 CNI, calico and kube-router do not encapsulate all packages with the hosts' ip addresses. Instead the packets will be routed with the PODs ip addresses directly. - -OpenStack will filter and drop all packets from ips it does not know to prevent spoofing. - -In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to allow pods packets by allowing the network they use. - -First you will need the ids of your OpenStack instances that will run kubernetes: - - ```bash - openstack server list --project YOUR_PROJECT - +--------------------------------------+--------+----------------------------------+--------+-------------+ - | ID | Name | Tenant ID | Status | Power State | - +--------------------------------------+--------+----------------------------------+--------+-------------+ - | e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running | - | 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running | - ``` - -Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack): - - ```bash - openstack port list -c id -c device_id --project YOUR_PROJECT - +--------------------------------------+--------------------------------------+ - | id | device_id | - +--------------------------------------+--------------------------------------+ - | 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 | - | e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 | - ``` - -Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.) - - ```bash - # allow kube_service_addresses and kube_pods_subnet network - openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 - openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 - ``` - -If all the VMs in the tenant correspond to Kubespray deployment, you can "sweep run" above with: - - ```bash - openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18 - ``` - -Now you can finally run the playbook. diff --git a/docs/cloud_controllers/vsphere.md b/docs/cloud_controllers/vsphere.md deleted file mode 100644 index 72a2c1dbad0..00000000000 --- a/docs/cloud_controllers/vsphere.md +++ /dev/null @@ -1,134 +0,0 @@ -# vSphere - -Kubespray can be deployed with vSphere as Cloud provider. This feature supports: - -- Volumes -- Persistent Volumes -- Storage Classes and provisioning of volumes -- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes - -## Out-of-tree vSphere cloud provider - -### Prerequisites - -You need at first to configure your vSphere environment by following the [official documentation](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#prerequisites). - -After this step you should have: - -- vSphere upgraded to 6.7 U3 or later -- VM hardware upgraded to version 15 or higher -- UUID activated for each VM where Kubernetes will be deployed - -### Kubespray configuration - -First in `inventory/sample/group_vars/all/all.yml` you must set the `cloud_provider` to `external` and `external_cloud_provider` to `vsphere`. - -```yml -cloud_provider: "external" -external_cloud_provider: "vsphere" -``` - -Then, `inventory/sample/group_vars/all/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below. - -| Variable | Required | Type | Choices | Default | Comment | -|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------| -| external_vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | -| external_vsphere_vcenter_port | TRUE | string | | "443" | Port of the vCenter API | -| external_vsphere_insecure | TRUE | string | "true", "false" | "true" | set to "true" if the host above uses a self-signed cert | -| external_vsphere_user | TRUE | string | | | User name for vCenter with required privileges (Can also be specified with the `VSPHERE_USER` environment variable) | -| external_vsphere_password | TRUE | string | | | Password for vCenter (Can also be specified with the `VSPHERE_PASSWORD` environment variable) | -| external_vsphere_datacenter | TRUE | string | | | Datacenter name to use | -| external_vsphere_kubernetes_cluster_id | TRUE | string | | "kubernetes-cluster-id" | Kubernetes cluster ID to use | -| vsphere_csi_enabled | TRUE | boolean | | false | Enable vSphere CSI | - -Example configuration: - -```yml -external_vsphere_vcenter_ip: "myvcenter.domain.com" -external_vsphere_vcenter_port: "443" -external_vsphere_insecure: "true" -external_vsphere_user: "administrator@vsphere.local" -external_vsphere_password: "K8s_admin" -external_vsphere_datacenter: "DATACENTER_name" -external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id" -vsphere_csi_enabled: true -``` - -For a more fine-grained CSI setup, refer to the [vsphere-csi](/docs/CSI/vsphere-csi.md) documentation. - -### Deployment - -Once the configuration is set, you can execute the playbook again to apply the new configuration: - -```ShellSession -cd kubespray -ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml -``` - -You'll find some useful examples [here](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#sample-manifests-to-test-csi-driver-functionality) to test your configuration. - -## In-tree vSphere cloud provider ([deprecated](https://cloud-provider-vsphere.sigs.k8s.io/concepts/in_tree_vs_out_of_tree.html)) - -### Prerequisites (deprecated) - -You need at first to configure your vSphere environment by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider). - -After this step you should have: - -- UUID activated for each VM where Kubernetes will be deployed -- A vSphere account with required privileges - -If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter. The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes. - -### Kubespray configuration (deprecated) - -First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`. - -```yml -cloud_provider: vsphere -``` - -Then, in the same file, you need to declare your vCenter credentials following the description below. - -| Variable | Required | Type | Choices | Default | Comment | -|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter | -| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 | -| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert | -| vsphere_user | TRUE | string | | | User name for vCenter with required privileges | -| vsphere_password | TRUE | string | | | Password for vCenter | -| vsphere_datacenter | TRUE | string | | | Datacenter name to use | -| vsphere_datastore | TRUE | string | | | Datastore name to use | -| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed | -| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". | -| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` (Optional, only used for Kubernetes <= 1.9.2) | -| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to | -| vsphere_resource_pool | FALSE | string | | Blank | Name of the Resource pool where the VMs are located (Optional, only used for Kubernetes >= 1.9.2) | -| vsphere_zone_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/zone` label on nodes (Optional, only used for Kubernetes >= 1.12.0) | -| vsphere_region_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/region` label on nodes (Optional, only used for Kubernetes >= 1.12.0) | - -Example configuration: - -```yml -vsphere_vcenter_ip: "myvcenter.domain.com" -vsphere_vcenter_port: 443 -vsphere_insecure: 1 -vsphere_user: "k8s@vsphere.local" -vsphere_password: "K8s_admin" -vsphere_datacenter: "DATACENTER_name" -vsphere_datastore: "DATASTORE_name" -vsphere_working_dir: "Docker_hosts" -vsphere_scsi_controller_type: "pvscsi" -vsphere_resource_pool: "K8s-Pool" -``` - -### Deployment (deprecated) - -Once the configuration is set, you can execute the playbook again to apply the new configuration: - -```ShellSession -cd kubespray -ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml -``` - -You'll find some useful examples [here](https://github.com/kubernetes/examples/tree/master/staging/volumes/vsphere) to test your configuration. diff --git a/docs/cloud_providers/aws.md b/docs/cloud_providers/aws.md deleted file mode 100644 index 41706fdd568..00000000000 --- a/docs/cloud_providers/aws.md +++ /dev/null @@ -1,95 +0,0 @@ -# AWS - -> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider) - -To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. Refer to the [Kubespray Configuration](#kubespray-configuration) for customizing the provider. - -Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. - -You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets, route tables and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targeted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`. - -Make sure your VPC has both DNS Hostnames support and Private DNS enabled. - -The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. - -You can now create your cluster! - -## Dynamic Inventory - -There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome. - -This will produce an inventory that is passed into Ansible that looks like the following: - -```json -{ - "_meta": { - "hostvars": { - "ip-172-31-3-xxx.us-east-2.compute.internal": { - "ansible_ssh_host": "172.31.3.xxx" - }, - "ip-172-31-8-xxx.us-east-2.compute.internal": { - "ansible_ssh_host": "172.31.8.xxx" - } - } - }, - "etcd": [ - "ip-172-31-3-xxx.us-east-2.compute.internal" - ], - "k8s_cluster": { - "children": [ - "kube_control_plane", - "kube_node" - ] - }, - "kube_control_plane": [ - "ip-172-31-3-xxx.us-east-2.compute.internal" - ], - "kube_node": [ - "ip-172-31-8-xxx.us-east-2.compute.internal" - ] -} -``` - -Guide: - -- Create instances in AWS as needed. -- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube_node`. You can also share roles like `kube_control_plane, etcd` -- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory. -- Set the following AWS credentials and info as environment variables in your terminal: - -```ShellSession -export AWS_ACCESS_KEY_ID="xxxxx" -export AWS_SECRET_ACCESS_KEY="yyyyy" -export AWS_REGION="us-east-2" -``` - -- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` - -**Optional** Using labels and taints - -To add labels to your kubernetes node, add the following tag to your instance: - -- Key: `kubespray-node-labels` -- Value: `node-role.kubernetes.io/ingress=` - -To add taints to your kubernetes node, add the following tag to your instance: - -- Key: `kubespray-node-taints` -- Value: `node-role.kubernetes.io/ingress=:NoSchedule` - -## Kubespray configuration - -Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case. - -| Variable | Type | Comment | -|------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| aws_zone | string | Force set the AWS zone. Recommended to leave blank. | -| aws_vpc | string | The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided | -| aws_subnet_id | string | SubnetID enables using a specific subnet to use for ELB's | -| aws_route_table_id | string | RouteTableID enables using a specific RouteTable | -| aws_role_arn | string | RoleARN is the IAM role to assume when interaction with AWS APIs | -| aws_kubernetes_cluster_tag | string | KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources | -| aws_kubernetes_cluster_id | string | KubernetesClusterID is the cluster id we'll use to identify our cluster resources | -| aws_disable_security_group_ingress | bool | The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000. | -| aws_elb_security_group | string | Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead. | -| aws_disable_strict_zone_check | bool | During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment. | diff --git a/docs/cloud_providers/azure.md b/docs/cloud_providers/azure.md deleted file mode 100644 index 50d2f1d1c2b..00000000000 --- a/docs/cloud_providers/azure.md +++ /dev/null @@ -1,125 +0,0 @@ -# Azure - -> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider) - -To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `'azure'`. - -All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in. - -Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/Azure/AKS) - -## Parameters - -Before creating the instances you must first set the `azure_` variables in the `group_vars/all/all.yml` file. - -All values can be retrieved using the Azure CLI tool which can be downloaded here: -After installation you have to run `az login` to get access to your account. - -### azure_cloud - -Azure Stack has different API endpoints, depending on the Azure Stack deployment. These need to be provided to the Azure SDK. -Possible values are: `AzureChinaCloud`, `AzureGermanCloud`, `AzurePublicCloud` and `AzureUSGovernmentCloud`. -The full list of existing settings for the AzureChinaCloud, AzureGermanCloud, AzurePublicCloud and AzureUSGovernmentCloud -is available in the source code [here](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/cloud-provider-config.md) - -### azure\_tenant\_id + azure\_subscription\_id - -run `az account show` to retrieve your subscription id and tenant id: -`azure_tenant_id` -> Tenant ID field -`azure_subscription_id` -> ID field - -### azure\_location - -The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `az account list-locations` - -### azure\_resource\_group - -The name of the resource group your instances are in, can be retrieved via `az group list` - -### azure\_vmtype - -The type of the vm. Supported values are `standard` or `vmss`. If vm is type of `Virtual Machines` then value is `standard`. If vm is part of `Virtual Machine Scale Sets` then value is `vmss` - -### azure\_vnet\_name - -The name of the virtual network your instances are in, can be retrieved via `az network vnet list` - -### azure\_vnet\_resource\_group - -The name of the resource group that contains the vnet. - -### azure\_subnet\_name - -The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME` - -### azure\_security\_group\_name - -The name of the network security group your instances are in, can be retrieved via `az network nsg list` - -### azure\_security\_group\_resource\_group - -The name of the resource group that contains the network security group. Defaults to `azure_vnet_resource_group` - -### azure\_route\_table\_name - -The name of the route table used with your instances. - -### azure\_route\_table\_resource\_group - -The name of the resource group that contains the route table. Defaults to `azure_vnet_resource_group` - -### azure\_aad\_client\_id + azure\_aad\_client\_secret - -These will have to be generated first: - -- Create an Azure AD Application with: - - ```ShellSession - az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET - ``` - -display name, identifier-uri, homepage and the password can be chosen -Note the AppId in the output. - -- Create Service principal for the application with: - - ```ShellSession - az ad sp create --id AppId - ``` - -This is the AppId from the last command - -- Create the role assignment with: - - ```ShellSession - az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID - ``` - -azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret. - -### azure\_loadbalancer\_sku - -Sku of Load Balancer and Public IP. Candidate values are: basic and standard. - -### azure\_exclude\_master\_from\_standard\_lb - -azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer. - -### azure\_disable\_outbound\_snat - -azure\_disable\_outbound\_snat disables the outbound SNAT for public load balancer rules. It should only be set when azure\_exclude\_master\_from\_standard\_lb is `standard`. - -### azure\_primary\_availability\_set\_name - -(Optional) The name of the availability set that should be used as the load balancer backend .If this is set, the Azure -cloudprovider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and -multiple agent pools (availability sets) are used, then the cloudprovider will try to add all nodes to a single backend -pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you MUST set this field. - -### azure\_use\_instance\_metadata - -Use instance metadata service where possible - -## Provisioning Azure with Resource Group Templates - -You'll find Resource Group Templates and scripts to provision the required infrastructure to Azure in [*contrib/azurerm*](../contrib/azurerm/README.md) diff --git a/docs/cloud_providers/cloud.md b/docs/cloud_providers/cloud.md deleted file mode 100644 index d88a3aeccc6..00000000000 --- a/docs/cloud_providers/cloud.md +++ /dev/null @@ -1,15 +0,0 @@ -# Cloud providers - -> **Removed**: Since v1.31 (the Kubespray counterpart is v2.27), Kubernetes no longer supports `cloud_provider`. (except external cloud provider) - -## Provisioning - -You can deploy instances in your cloud environment in several ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation. - -## Deploy kubernetes - -With ansible-playbook command - -```ShellSession -ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml -``` diff --git a/index.html b/index.html deleted file mode 100644 index 31d21576803..00000000000 --- a/index.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - - Kubespray - Deploy a Production Ready Kubernetes Cluster - - - - - - - -
- - - - - - - diff --git a/inventory/2SpeedLab/group_vars/all/all.yml b/inventory/2SpeedLab/group_vars/all/all.yml new file mode 100644 index 00000000000..0d624bfcc64 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/all.yml @@ -0,0 +1,139 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values only 'external' after K8s v1.31. +# cloud_provider: + +# External Cloud Controller Manager (Formerly known as cloud provider) +# cloud_provider must be "external", otherwise this setting is invalid. +# Supported external cloud controllers are: 'openstack', 'vsphere', 'oci', 'huaweicloud', 'hcloud' and 'manual' +# 'manual' does not install the cloud controller manager used by Kubespray. +# If you fill in a value other than the above, the check will fail. +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed +# http_proxy: "" +# https_proxy: "" +# https_proxy_cert_file: "" + +## Refer to roles/kubespray_defaults/defaults/main/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes +## in the no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false + +## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases. +allow_unsupported_distribution_setup: false diff --git a/inventory/2SpeedLab/group_vars/all/containerd.yml b/inventory/2SpeedLab/group_vars/all/containerd.yml new file mode 100644 index 00000000000..871a222edcd --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/containerd.yml @@ -0,0 +1,61 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +containerd_storage_dir: "/var/lib/containerd" +containerd_state_dir: "/run/containerd" +containerd_oom_score: 0 + +containerd_default_runtime: "runc" +containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# Containerd debug socket location: unix or tcp format +# containerd_debug_address: "" + +# Containerd log level +# containerd_debug_level: "info" + +# Containerd logs format, supported values: text, json +# containerd_debug_format: "" + +# Containerd debug socket UID +# containerd_debug_uid: 0 + +# Containerd debug socket GID +# containerd_debug_gid: 0 + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +# Registries defined within containerd. +# containerd_registries_mirrors: +# - prefix: docker.io +# mirrors: +# - host: https://registry-1.docker.io +# capabilities: ["pull", "resolve"] +# skip_verify: false +# header: +# Authorization: "Basic XXX" + +# containerd_max_container_log_line_size: 16384 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/inventory/2SpeedLab/group_vars/all/coreos.yml b/inventory/2SpeedLab/group_vars/all/coreos.yml new file mode 100644 index 00000000000..22c21666304 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/coreos.yml @@ -0,0 +1,2 @@ +## Does coreos need auto upgrade, default is true +# coreos_auto_upgrade: true diff --git a/inventory/2SpeedLab/group_vars/all/cri-o.yml b/inventory/2SpeedLab/group_vars/all/cri-o.yml new file mode 100644 index 00000000000..0bf5d6d1d54 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/cri-o.yml @@ -0,0 +1,25 @@ +# Registries defined within cri-o. +# crio_insecure_registries: +# - 10.0.0.2:5000 + +# Auth config for the registries +# crio_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass +crio_registries: + - prefix: docker.io + insecure: false + blocked: false + location: docker.io + unqualified: true + - prefix: quay.io + insecure: false + blocked: false + location: quay.io + unqualified: true + +crio_unqualified_search_registries: + - docker.io + - quay.io + - gcr.io diff --git a/inventory/2SpeedLab/group_vars/all/etcd.yml b/inventory/2SpeedLab/group_vars/all/etcd.yml new file mode 100644 index 00000000000..39600c35fbe --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host diff --git a/inventory/2SpeedLab/group_vars/all/offline.yml b/inventory/2SpeedLab/group_vars/all/offline.yml new file mode 100644 index 00000000000..07bd5fc8010 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/all/offline.yml @@ -0,0 +1,114 @@ +--- +## Global Offline settings +### Private Container Image Registry +# registry_host: "myprivateregisry.com" +# files_repo: "http://myprivatehttpd" +### If using CentOS, RedHat, AlmaLinux or Fedora +# yum_repo: "http://myinternalyumrepo" +### If using Debian +# debian_repo: "http://myinternaldebianrepo" +### If using Ubuntu +# ubuntu_repo: "http://myinternalubunturepo" + +## Container Registry overrides +# kube_image_repo: "{{ registry_host }}" +# gcr_image_repo: "{{ registry_host }}" +# github_image_repo: "{{ registry_host }}" +# docker_image_repo: "{{ registry_host }}" +# quay_image_repo: "{{ registry_host }}" + +## Kubernetes components +# kubeadm_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubeadm" +# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" +# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/v{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet" + + +## Two options - Override entire repository or override only a single binary. + +## [Optional] 1 - Override entire binary repository +# github_url: "https://my_github_proxy" +# dl_k8s_io_url: "https://my_dl_k8s_io_proxy" +# storage_googleapis_url: "https://my_storage_googleapi_proxy" +# get_helm_url: "https://my_helm_sh_proxy" + +## [Optional] 2 - Override a specific binary +## CNI Plugins +# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/v{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-v{{ cni_version }}.tgz" + +## cri-tools +# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/v{{ crictl_version }}/crictl-v{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +## [Optional] etcd: only if you use etcd_deployment=host +# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/v{{ etcd_version }}/etcd-v{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] Calico: If using Calico network plugin +# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/v{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" +# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore +# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/v{{ calico_version }}.tar.gz" + +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/v{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + +# [Optional] helm: only if you set helm_enabled: true +# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-v{{ helm_version }}-linux-{{ image_arch }}.tar.gz" + +# [Optional] crun: only if you set crun_enabled: true +# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}" + +# [Optional] kata: only if you set kata_containers_enabled: true +# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz" + +# [Optional] cri-dockerd: only if you set container_manager: docker +# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz" + +# [Optional] runc: if you set container_manager to containerd or crio +# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/v{{ runc_version }}/runc.{{ image_arch }}" + +# [Optional] cri-o: only if you set container_manager: crio +# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable" +# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/" +# crio_download_url: "{{ files_repo }}/storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.v{{ crio_version }}.tar.gz" +# skopeo_download_url: "{{ files_repo }}/github.com/lework/skopeo-binary/releases/download/v{{ skopeo_version }}/skopeo-linux-{{ image_arch }}" + +# [Optional] containerd: only if you set container_runtime: containerd +# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz" +# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" + +# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true +# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc" +# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1" + + +## CentOS/Redhat/AlmaLinux +### For EL8, baseos and appstream must be available, +### By default we enable those repo automatically +# rhel_enable_repos: false +### Docker / Containerd +# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch" +# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Fedora +### Docker +# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}" +# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" +### Containerd +# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd" +# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg" + +## Debian +### Docker +# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce" +# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg" +### Containerd +# containerd_debian_repo_base_url: "{{ debian_repo }}/containerd" +# containerd_debian_repo_gpgkey: "{{ debian_repo }}/containerd/gpg" +# containerd_debian_repo_repokey: 'YOURREPOKEY' + +## Ubuntu +### Docker +# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce" +# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg" +### Containerd +# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd" +# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg" +# containerd_ubuntu_repo_repokey: 'YOURREPOKEY' diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml new file mode 100644 index 00000000000..80dab1af991 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,248 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: true + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: true +metrics_server_container_port: 10250 +metrics_server_kubelet_insecure_tls: true +metrics_server_metric_resolution: 15s +metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +metrics_server_host_network: false +metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.24" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an external CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# Gateway API CRDs +gateway_api_enabled: false + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +# ingress_nginx_service_type: LoadBalancer +# ingress_nginx_service_annotations: +# example.io/loadbalancerIPs: 1.2.3.4 +# ingress_nginx_service_nodeport_http: 30080 +# ingress_nginx_service_nodeport_https: 30081 +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx +# ingress_nginx_without_class: true +# ingress_nginx_default: false + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# cert_manager_dns_policy: "ClusterFirst" +# cert_manager_dns_config: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +# cert_manager_controller_extra_args: +# - "--dns01-recursive-nameservers-only=true" +# - "--dns01-recursive-nameservers=1.1.1.1:53,8.8.8.8:53" + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +metallb_namespace: "metallb-system" +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_config: +# speaker: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# controller: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# address_pools: +# primary: +# ip_range: +# - 10.5.0.0/16 +# auto_assign: true +# pool1: +# ip_range: +# - 10.6.0.0/16 +# auto_assign: true +# pool2: +# ip_range: +# - 10.10.0.0/16 +# auto_assign: true +# layer2: +# - primary +# layer3: +# defaults: +# peer_port: 179 +# hold_time: 120s +# communities: +# vpn-only: "1234:1" +# NO_ADVERTISE: "65535:65282" +# metallb_peers: +# peer1: +# peer_address: 10.6.0.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# communities: +# - vpn-only +# address_pool: +# - pool1 +# peer2: +# peer_address: 10.10.0.1 +# peer_asn: 64513 +# my_asn: 4200000000 +# communities: +# - NO_ADVERTISE +# address_pool: +# - pool2 + +argocd_enabled: false +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated and stored in `argocd-initial-admin-secret` in the argocd namespace defined above. +# Using the argocd CLI the generated password can be automatically be fetched from the current kubectl context with the command: +# argocd admin initial-password -n argocd +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl + +# Kube VIP +kube_vip_enabled: false +#kube_vip_arp_enabled: true +#kube_vip_controlplane_enabled: true +#kube_vip_address: 10.10.88.21 +#loadbalancer_apiserver: +# address: "{{ kube_vip_address }}" +# port: 6443 +#kube_vip_interface: eth0 +#kube_vip_services_enabled: true +#kube_vip_dns_mode: first +#kube_vip_cp_detect: false +#kube_vip_leasename: plndr-cp-lock +#kube_vip_enable_node_labeling: false +#kube_vip_lb_fwdmethod: local + +# Node Feature Discovery +node_feature_discovery_enabled: false +# node_feature_discovery_gc_sa_name: node-feature-discovery +# node_feature_discovery_gc_sa_create: false +# node_feature_discovery_worker_sa_name: node-feature-discovery +# node_feature_discovery_worker_sa_create: false +# node_feature_discovery_master_config: +# extraLabelNs: ["nvidia.com"] diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 00000000000..84663ee1369 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,374 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: root + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + +# disable kubeproxy +kube_proxy_remove: true + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook or kube_apiserver_authorization_config_authorizers must configure a type: Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: cilium + +# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if ipv6_stack is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if ipv6_stack is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if ipv6_stack is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables, nftables +# TODO: it needs to be changed to nftables when the upstream use nftables as default +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: true + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: {{ inventory_hostname }} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +kubelet_shutdown_grace_period: 60s +kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.2speedlab.dev +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +#nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential +# Apply extra options to coredns kubernetes plugin +# coredns_kubernetes_extra_opts: +# - 'fallthrough example.local' +# Forward extra domains to the coredns kubernetes plugin +# coredns_kubernetes_extra_domains: '' + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_subnets.split(',') | first | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: crio + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +# kubelet_runtime_cgroups: "/{{ kube_service_cgroups }}/{{ container_manager }}.service" +# kubelet_kubelet_cgroups: "/{{ kube_service_cgroups }}/kubelet.service" + +## Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service" +# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +# Whether to run kubelet and container-engine daemons in a dedicated cgroup. +# kube_reserved: false +## Uncomment to override default values +## The following two items need to be set when kube_reserved is true +# kube_reserved_cgroups_for_service_slice: kube.slice +# kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}" +# kube_memory_reserved: 256Mi +# kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +## The following two items need to be set when system_reserved is true +# system_reserved_cgroups_for_service_slice: system.slice +# system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}" +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +supplementary_addresses_in_ssl_keys: [10.10.24.105, 10.10.24.109, 10.10.25.27, 10.10.25.74, rancher1.tabbycatlab.dev] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:00:00" + +kubeadm_patches_dir: "{{ kube_config_dir }}/patches" +kubeadm_patches: [] +# See https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#patches +# Correspondance with this link +# patchtype = type +# target = target +# suffix -> managed automatically +# extension -> always "yaml" +# kubeadm_patches: +# - target: kube-apiserver|kube-controller-manager|kube-scheduler|etcd|kubeletconfiguration +# type: strategic(default)|json|merge +# patch: +# metadata: +# annotations: +# example.com/test: "true" +# labels: +# example.com/prod_level: "{{ prod_level }}" +# - ... +# Patches are applied in the order they are specified. + +# Set to true to remove the role binding to anonymous users created by kubeadm +remove_anonymous_access: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml new file mode 100644 index 00000000000..f1487287230 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-cilium.yml @@ -0,0 +1,395 @@ +--- +# Log-level +# cilium_debug: false + +# cilium_mtu: "" +# cilium_enable_ipv4: true +# cilium_enable_ipv6: false + +# Enable l2 announcement from cilium to replace Metallb Ref: https://docs.cilium.io/en/v1.14/network/l2-announcements/ +cilium_l2announcements: false + +# Cilium agent health port +cilium_agent_health_port: "9879" + +# Identity allocation mode selects how identities are shared between cilium +# nodes by setting how they are stored. The options are "crd" or "kvstore". +# - "crd" stores identities in kubernetes as CRDs (custom resource definition). +# These can be queried with: +# `kubectl get ciliumid` +# - "kvstore" stores identities in an etcd kvstore. +# - In order to support External Workloads, "crd" is required +# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta +# - KVStore operations are only required when cilium-operator is running with any of the below options: +# - --synchronize-k8s-services +# - --synchronize-k8s-nodes +# - --identity-allocation-mode=kvstore +# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations +cilium_identity_allocation_mode: crd + +# Etcd SSL dirs +# cilium_cert_dir: /etc/cilium/certs +# kube_etcd_cacert_file: ca.pem +# kube_etcd_cert_file: cert.pem +# kube_etcd_key_file: cert-key.pem + +# Limits for apps +cilium_memory_limit: 1000M +cilium_cpu_limit: 1000m +cilium_memory_requests: 64M +cilium_cpu_requests: 100m + +# Overlay Network Mode +cilium_tunnel_mode: vxlan + +cilium_ingress_enabled: true +cilium_ingress_default: true +cilium_ingress_load_balancer_mode: "dedicated" + +# LoadBalancer Mode (snat/dsr/hybrid) Ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#dsr-mode +# cilium_loadbalancer_mode: snat + +# Optional features +# cilium_enable_prometheus: false +# Enable if you want to make use of hostPort mappings +# cilium_enable_portmap: false +# Monitor aggregation level (none/low/medium/maximum) +# cilium_monitor_aggregation: medium +# The monitor aggregation flags determine which TCP flags which, upon the +# first observation, cause monitor notifications to be generated. +# +# Only effective when monitor aggregation is set to "medium" or higher. +# cilium_monitor_aggregation_flags: "all" +# Kube Proxy Replacement mode (strict/partial) +cilium_kube_proxy_replacement: true + +# If upgrading from Cilium < 1.5, you may want to override some of these options +# to prevent service disruptions. See also: +# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action +# cilium_preallocate_bpf_maps: false + +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +# cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +# cilium_enable_legacy_services: false + +# Unique ID of the cluster. Must be unique across all connected clusters and +# in the range of 1 and 255. Only relevant when building a mesh of clusters. +# This value is not defined by default +# cilium_cluster_id: + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +# cilium_deploy_additionally: false + +# Auto direct nodes routes can be used to advertise pods routes in your cluster +# without any tunneling (with `cilium_tunnel_mode` sets to `disabled`). +# This works only if you have a L2 connectivity between all your nodes. +# You wil also have to specify the variable `cilium_native_routing_cidr` to +# make this work. Please refer to the cilium documentation for more +# information about this kind of setups. +# cilium_auto_direct_node_routes: false + +# Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +# cilium_native_routing_cidr: "" + +# Allows to explicitly specify the IPv6 CIDR for native routing. +# cilium_native_routing_cidr_ipv6: "" + +# Enable transparent network encryption. +cilium_encryption_enabled: false + +# Encryption method. Can be either ipsec or wireguard. +# Only effective when `cilium_encryption_enabled` is set to true. +#cilium_encryption_type: "wireguard" + +# Enable encryption for pure node to node traffic. +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +# cilium_ipsec_node_encryption: false + +# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. +# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, +# it will fallback on the wireguard-go user-space implementation of WireGuard. +# This option is only effective when `cilium_encryption_type` is set to `wireguard`. +# cilium_wireguard_userspace_fallback: false + +# IP Masquerade Agent +# https://docs.cilium.io/en/stable/concepts/networking/masquerading/ +# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded +# cilium_ip_masq_agent_enable: false + +### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded +# cilium_non_masquerade_cidrs: +# - 10.0.0.0/8 +# - 172.16.0.0/12 +# - 192.168.0.0/16 +# - 100.64.0.0/10 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 192.88.99.0/24 +# - 198.18.0.0/15 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +### Indicates whether to masquerade traffic to the link local prefix. +### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. +# cilium_masq_link_local: false +### A time interval at which the agent attempts to reload config from disk +# cilium_ip_masq_resync_interval: 60s + +### Host Firewall and Policy Audit Mode +# cilium_enable_host_firewall: false +# cilium_policy_audit_mode: false + +# Hubble +### Enable Hubble without install +cilium_enable_hubble: true +### Enable Hubble-ui +### Installed by default when hubble is enabled. To disable set to false +# cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" +### Enable Hubble Metrics +cilium_enable_hubble_metrics: true +### if cilium_enable_hubble_metrics: true +cilium_hubble_metrics: + - dns + - drop + - tcp + - flow + - icmp + - http +### Enable Hubble install +# cilium_hubble_install: false +### Enable auto generate certs if cilium_hubble_install: true +# cilium_hubble_tls_generate: false + +### Tune cilium_hubble_event_buffer_capacity & cilium_hubble_event_queue_size values to avoid dropping events when hubble is under heavy load +### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535 +### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095) +# cilium_hubble_event_buffer_capacity: 4095 +### Buffer size of the channel to receive monitor events. +# cilium_hubble_event_queue_size: 50 + +# Override the DNS suffix that Hubble-Relay uses to resolve its peer service. +# It defaults to the inventory's `dns_domain`. +# cilium_hubble_peer_service_cluster_domain: "{{ dns_domain }}" + +# IP address management mode for v1.9+. +# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ +cilium_ipam_mode: kubernetes + +# Extra arguments for the Cilium agent +cilium_agent_custom_args: + - --write-cni-conf-when-ready=/host/etc/cni/net.d/05-cilium.conflist + +# For adding and mounting extra volumes to the cilium agent +# cilium_agent_extra_volumes: [] +# cilium_agent_extra_volume_mounts: [] + +# cilium_agent_extra_env_vars: [] + +# cilium_operator_replicas: 2 + +# The address at which the cillium operator bind health check api +cilium_operator_api_serve_addr: "0.0.0.0:9234" + +## A dictionary of extra config variables to add to cilium-config, formatted like: +## cilium_config_extra_vars: +## var1: "value1" +## var2: "value2" +# cilium_config_extra_vars: {} + +# For adding and mounting extra volumes to the cilium operator +# cilium_operator_extra_volumes: [] +# cilium_operator_extra_volume_mounts: [] + +# Extra arguments for the Cilium Operator +# cilium_operator_custom_args: [] + +# Name of the cluster. Only relevant when building a mesh of clusters. +# cilium_cluster_name: default + +# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. +# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. +# Available for Cilium v1.10 and up. +# cilium_cni_exclusive: true + +# Configure the log file for CNI logging with retention policy of 7 days. +# Disable CNI file logging by setting this field to empty explicitly. +# Available for Cilium v1.12 and up. +# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log" + +# -- Configure cgroup related configuration +# -- Enable auto mount of cgroup2 filesystem. +# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at +# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod. +# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted +# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the +# volume will be mounted inside the cilium agent pod at the same path. +# Available for Cilium v1.11 and up +# cilium_cgroup_auto_mount: true +# -- Configure cgroup root where cgroup2 filesystem is mounted on the host +# cilium_cgroup_host_root: "/run/cilium/cgroupv2" + +# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic +# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. +# cilium_bpf_map_dynamic_size_ratio: "0.0" + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +cilium_enable_ipv4_masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +# Available for Cilium v1.10 and up +# cilium_enable_ipv6_masquerade: true + +# -- Enable native IP masquerade support in eBPF +# cilium_enable_bpf_masquerade: false + +# -- Enable BGP Control Plane +# cilium_enable_bgp_control_plane: false + +# -- Configure Loadbalancer IP Pools +# cilium_loadbalancer_ip_pools: +# - name: "blue-pool" +# cidrs: +# - "10.0.10.0/24" +# ranges: +# - start: "20.0.20.100" +# stop: "20.0.20.200" +# - start: "1.2.3.4" + +# -- Configure BGP Instances (New bgpv2 API v1.16+) +# cilium_bgp_cluster_configs: +# - name: "cilium-bgp" +# spec: +# bgpInstances: +# - name: "instance-64512" +# localASN: 64512 +# peers: +# - name: "peer-64512-tor1" +# peerASN: 64512 +# peerAddress: '10.47.1.1' +# peerConfigRef: +# name: "cilium-peer" +# nodeSelector: +# matchExpressions: +# - {key: somekey, operator: NotIn, values: ['never-used-value']} + +# -- Configure BGP Peers (New bgpv2 API v1.16+) +# cilium_bgp_peer_configs: +# - name: cilium-peer +# spec: +# # authSecretRef: bgp-auth-secret +# gracefulRestart: +# enabled: true +# restartTimeSeconds: 15 +# families: +# - afi: ipv4 +# safi: unicast +# advertisements: +# matchLabels: +# advertise: "bgp" +# - afi: ipv6 +# safi: unicast +# advertisements: +# matchLabels: +# advertise: "bgp" + +# -- Configure BGP Advertisements (New bgpv2 API v1.16+) +# cilium_bgp_advertisements: +# - name: bgp-advertisements +# labels: +# advertise: bgp +# spec: +# advertisements: +# # - advertisementType: "PodCIDR" +# # attributes: +# # communities: +# # standard: [ "64512:99" ] +# - advertisementType: "Service" +# service: +# addresses: +# - ClusterIP +# - ExternalIP +# - LoadBalancerIP +# selector: +# matchExpressions: +# - {key: somekey, operator: NotIn, values: ['never-used-value']} + +# -- Configure BGP Node Config Overrides (New bgpv2 API v1.16+) +# cilium_bgp_node_config_overrides: +# - name: bgp-node-config-override +# spec: +# bgpInstances: +# - name: "instance-65000" +# routerID: "192.168.10.1" +# localPort: 1790 +# peers: +# - name: "peer-65000-tor1" +# localAddress: fd00:10:0:2::2 +# - name: "peer-65000-tor2" +# localAddress: fd00:11:0:2::2 + +# -- Configure BGP Peers (Legacy v1.16+) +# cilium_bgp_peering_policies: +# - name: "01-bgp-peering-policy" +# spec: +# virtualRouters: +# - localASN: 64512 +# exportPodCIDR: false +# neighbors: +# - peerAddress: '10.47.1.1/24' +# peerASN: 64512 +# serviceSelector: +# matchExpressions: +# - {key: somekey, operator: NotIn, values: ['never-used-value']} + +# -- Configure whether direct routing mode should route traffic via +# host stack (true) or directly and more efficiently out of BPF (false) if +# the kernel supports it. The latter has the implication that it will also +# bypass netfilter in the host namespace. +# cilium_enable_host_legacy_routing: true + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +# cilium_enable_remote_node_identity: true + +# -- Enable the use of well-known identities. +# cilium_enable_well_known_identities: false + +# cilium_enable_bpf_clock_probe: true + +# -- Whether to enable CNP status updates. +# cilium_disable_cnp_status_updates: true + +# A list of extra rules variables to add to clusterrole for cilium operator, formatted like: +# cilium_clusterrole_rules_operator_extra_vars: +# - apiGroups: +# - '""' +# resources: +# - pods +# verbs: +# - delete +# - apiGroups: +# - '""' +# resources: +# - nodes +# verbs: +# - list +# - watch +# resourceNames: +# - toto +# cilium_clusterrole_rules_operator_extra_vars: [] + +# Cilium extra values, use any values from cilium Helm Chart +# ref: https://docs.cilium.io/en/stable/helm-reference/ +# cilium_extra_values: {} diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml new file mode 100644 index 00000000000..64d20a825bb --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -0,0 +1,18 @@ +# see roles/network_plugin/flannel/defaults/main.yml + +## interface that should be used for flannel operations +## This is actually an inventory cluster-level item +# flannel_interface: + +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + +# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard' +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md +# flannel_backend_type: "vxlan" +# flannel_vxlan_vni: 1 +# flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-kube-router.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-kube-router.yml new file mode 100644 index 00000000000..8008b98a132 --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/k8s-net-kube-router.yml @@ -0,0 +1,67 @@ +# See roles/network_plugin/kube-router/defaults/main.yml + +# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP +# kube_router_run_router: true + +# Enables Network Policy -- sets up iptables to provide ingress firewall for pods +# kube_router_run_firewall: true + +# Enables Service Proxy -- sets up IPVS for Kubernetes Services +# see docs/kube-router.md "Caveats" section +# kube_router_run_service_proxy: false + +# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. +# kube_router_advertise_cluster_ip: false + +# Add External IP of service to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_external_ip: false + +# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers. +# kube_router_advertise_loadbalancer_ip: false + +# Enables BGP graceful restarts +# kube_router_bgp_graceful_restart: true + +# Adjust manifest of kube-router daemonset template with DSR needed changes +# kube_router_enable_dsr: false + +# Array of arbitrary extra arguments to kube-router, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md +# kube_router_extra_args: [] + +# ASN number of the cluster, used when communicating with external BGP routers +# kube_router_cluster_asn: ~ + +# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr. +# kube_router_peer_router_asns: ~ + +# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's. +# kube_router_peer_router_ips: ~ + +# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used. +# kube_router_peer_router_ports: ~ + +# Setups node CNI to allow hairpin mode, requires node reboots, see +# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode +# kube_router_support_hairpin_mode: false + +# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc. +# kube_router_dns_policy: ClusterFirstWithHostNet + +# Array of annotations for master +# kube_router_annotations_master: [] + +# Array of annotations for every node +# kube_router_annotations_node: [] + +# Array of common annotations for every node +# kube_router_annotations_all: [] + +# Enables scraping kube-router metrics with Prometheus +# kube_router_enable_metrics: false + +# Path to serve Prometheus metrics on +# kube_router_metrics_path: /metrics + +# Prometheus metrics port to use +# kube_router_metrics_port: 9255 diff --git a/inventory/2SpeedLab/group_vars/k8s_cluster/kube_control_plane.yml b/inventory/2SpeedLab/group_vars/k8s_cluster/kube_control_plane.yml new file mode 100644 index 00000000000..c90f8f2ab0f --- /dev/null +++ b/inventory/2SpeedLab/group_vars/k8s_cluster/kube_control_plane.yml @@ -0,0 +1,11 @@ +# Reservation for control plane kubernetes components +# kube_memory_reserved: 512Mi +# kube_cpu_reserved: 200m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" + +# Reservation for control plane host system +# system_memory_reserved: 256Mi +# system_cpu_reserved: 250m +# system_ephemeral_storage_reserved: 2Gi +# system_pid_reserved: "1000" diff --git a/inventory/2SpeedLab/inventory.ini b/inventory/2SpeedLab/inventory.ini new file mode 100644 index 00000000000..605372e2d14 --- /dev/null +++ b/inventory/2SpeedLab/inventory.ini @@ -0,0 +1,28 @@ +# This inventory describe a HA typology with stacked etcd (== same nodes as control plane) +# and 3 worker nodes +# See https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html +# for tips on building your # inventory + +# Configure 'ip' variable to bind kubernetes services on a different ip than the default iface +# We should set etcd_member_name for etcd cluster. The node that are not etcd members do not need to set the value, +# or can set the empty string value. +[kube_control_plane] +node1 ansible_host=10.10.24.109 ip=10.10.24.109 etcd_member_name=etcd1 +node2 ansible_host=10.10.25.114 ip=10.10.25.114 etcd_member_name=etcd2 +node3 ansible_host=10.10.24.62 ip=10.10.24.62 etcd_member_name=etcd3 + +[etcd:children] +kube_control_plane + +[kube_node] +node4 ansible_host=10.10.25.27 +node5 ansible_host=10.10.24.155 +node6 ansible_host=10.10.25.35 +node7 ansible_host=10.10.25.74 +node8 ansible_host=10.10.24.161 +node9 ansible_host=10.10.24.90 + +[database] +node7 ansible_host=10.10.25.74 +node8 ansible_host=10.10.24.161 +node9 ansible_host=10.10.24.90 \ No newline at end of file diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index cb9fa2438e7..57b5884131e 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -23,7 +23,7 @@ local_release_dir: "/tmp/releases" retry_stagger: 5 # This is the user that owns tha cluster installation. -kube_owner: kube +kube_owner: root # This is the group that the cert creation scripts chgrp the # cert files to. Not really changeable... diff --git a/logo/LICENSE b/logo/LICENSE deleted file mode 100644 index 8f2aa434480..00000000000 --- a/logo/LICENSE +++ /dev/null @@ -1 +0,0 @@ -# The Kubespray logo files are licensed under a choice of either Apache-2.0 or CC-BY-4.0 (Creative Commons Attribution 4.0 International). diff --git a/logo/logo-clear.png b/logo/logo-clear.png deleted file mode 100644 index 3ce32f6e33f..00000000000 Binary files a/logo/logo-clear.png and /dev/null differ diff --git a/logo/logo-clear.svg b/logo/logo-clear.svg deleted file mode 100644 index 7d60232126e..00000000000 --- a/logo/logo-clear.svg +++ /dev/null @@ -1,80 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-dark.png b/logo/logo-dark.png deleted file mode 100644 index 5fc3660668e..00000000000 Binary files a/logo/logo-dark.png and /dev/null differ diff --git a/logo/logo-dark.svg b/logo/logo-dark.svg deleted file mode 100644 index 3f6f0a66f46..00000000000 --- a/logo/logo-dark.svg +++ /dev/null @@ -1,83 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-text-clear.png b/logo/logo-text-clear.png deleted file mode 100644 index b8412407d3c..00000000000 Binary files a/logo/logo-text-clear.png and /dev/null differ diff --git a/logo/logo-text-clear.svg b/logo/logo-text-clear.svg deleted file mode 100644 index b1029ded911..00000000000 --- a/logo/logo-text-clear.svg +++ /dev/null @@ -1,107 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-text-dark.png b/logo/logo-text-dark.png deleted file mode 100644 index 1871c0fb377..00000000000 Binary files a/logo/logo-text-dark.png and /dev/null differ diff --git a/logo/logo-text-dark.svg b/logo/logo-text-dark.svg deleted file mode 100644 index 52bdb4e1e12..00000000000 --- a/logo/logo-text-dark.svg +++ /dev/null @@ -1,110 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logo-text-mixed.png b/logo/logo-text-mixed.png deleted file mode 100644 index a4b3b39d8a7..00000000000 Binary files a/logo/logo-text-mixed.png and /dev/null differ diff --git a/logo/logo-text-mixed.svg b/logo/logo-text-mixed.svg deleted file mode 100644 index 44ea1feb6ac..00000000000 --- a/logo/logo-text-mixed.svg +++ /dev/null @@ -1,110 +0,0 @@ - - image/svg+xml - - - - - - - - - - - - - - - - background - - - - Layer 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/logo/logos.pdf b/logo/logos.pdf deleted file mode 100644 index ed7a1f5f84c..00000000000 Binary files a/logo/logos.pdf and /dev/null differ diff --git a/logo/usage_guidelines.md b/logo/usage_guidelines.md deleted file mode 100644 index 9a081235e85..00000000000 --- a/logo/usage_guidelines.md +++ /dev/null @@ -1,16 +0,0 @@ -# Kubernetes Branding Guidelines - -These guidelines provide you with guidance for using the Kubespray logo. -All artwork is made available under the Linux Foundation trademark usage -[guidelines](https://www.linuxfoundation.org/trademark-usage/). This text from -those guidelines, and the correct and incorrect usage examples, are particularly -helpful: ->Certain marks of The Linux Foundation have been created to enable you to ->communicate compatibility or interoperability of software or products. In ->addition to the requirement that any use of a mark to make an assertion of ->compatibility must, of course, be accurate, the use of these marks must ->avoid confusion regarding The Linux Foundation’s association with the ->product. The use of the mark cannot imply that The Linux Foundation or ->its projects are sponsoring or endorsing the product. - -Additionally, permission is granted to modify the Kubespray mark for non-commercial uses such as t-shirts and stickers. diff --git a/meta/runtime.yml b/meta/runtime.yml index b1198d77732..f3791fb8514 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,2 +1,2 @@ --- -requires_ansible: ">=2.17.3" +requires_ansible: ">=2.16.14" diff --git a/pipeline.Dockerfile b/pipeline.Dockerfile deleted file mode 100644 index d6d6ebcd03e..00000000000 --- a/pipeline.Dockerfile +++ /dev/null @@ -1,60 +0,0 @@ -# Use imutable image tags rather than mutable tags (like ubuntu:22.04) -FROM ubuntu:jammy-20230308 -# Some tools like yamllint need this -# Pip needs this as well at the moment to install ansible -# (and potentially other packages) -# See: https://github.com/pypa/pip/issues/10219 -ENV VAGRANT_VERSION=2.4.1 \ - VAGRANT_DEFAULT_PROVIDER=libvirt \ - VAGRANT_ANSIBLE_TAGS=facts \ - LANG=C.UTF-8 \ - DEBIAN_FRONTEND=noninteractive \ - PYTHONDONTWRITEBYTECODE=1 - -RUN apt update -q \ - && apt install -yq \ - libssl-dev \ - python3-dev \ - python3-pip \ - sshpass \ - apt-transport-https \ - jq \ - moreutils \ - libvirt-dev \ - openssh-client \ - rsync \ - git \ - ca-certificates \ - curl \ - gnupg2 \ - software-properties-common \ - unzip \ - libvirt-clients \ - qemu-utils \ - qemu-kvm \ - dnsmasq \ - && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ - && add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \ - && apt update -q \ - && apt install --no-install-recommends -yq docker-ce \ - && apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/* - -WORKDIR /kubespray -ADD ./requirements.txt /kubespray/requirements.txt -ADD ./tests/requirements.txt /kubespray/tests/requirements.txt - -RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \ - && pip install --no-compile --no-cache-dir pip -U \ - && pip install --no-compile --no-cache-dir -r tests/requirements.txt \ - && pip install --no-compile --no-cache-dir -r requirements.txt \ - && curl -L https://dl.k8s.io/release/v1.33.4/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \ - && echo $(curl -L https://dl.k8s.io/release/v1.33.4/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \ - && chmod a+x /usr/local/bin/kubectl \ - # Install Vagrant - && curl -LO https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \ - && dpkg -i vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \ - && rm vagrant_${VAGRANT_VERSION}-1_$(dpkg --print-architecture).deb \ - && vagrant plugin install vagrant-libvirt \ - # Install Kubernetes collections - && pip install --no-compile --no-cache-dir kubernetes \ - && ansible-galaxy collection install kubernetes.core diff --git a/playbooks/ansible_version.yml b/playbooks/ansible_version.yml index a07b401e6d1..e2db9f79363 100644 --- a/playbooks/ansible_version.yml +++ b/playbooks/ansible_version.yml @@ -5,7 +5,7 @@ become: false run_once: true vars: - minimal_ansible_version: 2.17.3 + minimal_ansible_version: 2.16.14 maximal_ansible_version: 2.18.0 tags: always tasks: diff --git a/playbooks/cluster.yml b/playbooks/cluster.yml index 12aeeee02cf..05df9121205 100644 --- a/playbooks/cluster.yml +++ b/playbooks/cluster.yml @@ -55,15 +55,6 @@ - { role: kubernetes-apps/common_crds } - { role: network_plugin, tags: network } -- name: Install Calico Route Reflector - hosts: calico_rr - gather_facts: false - any_errors_fatal: "{{ any_errors_fatal | default(true) }}" - environment: "{{ proxy_disable_env }}" - roles: - - { role: kubespray_defaults } - - { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } - - name: Patch Kubernetes for Windows hosts: kube_control_plane[0] gather_facts: false diff --git a/roles/container-engine/containerd-common/defaults/main.yml b/roles/container-engine/containerd-common/defaults/main.yml index bceb5c5778b..ae1c6e05a0c 100644 --- a/roles/container-engine/containerd-common/defaults/main.yml +++ b/roles/container-engine/containerd-common/defaults/main.yml @@ -3,3 +3,15 @@ # manager controlled installs to direct download ones. containerd_package: 'containerd.io' yum_repo_dir: /etc/yum.repos.d + +# Keep minimal repo information around for cleanup +containerd_repo_info: + repos: + +# Ubuntu docker-ce repo +containerd_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" +containerd_ubuntu_repo_component: "stable" + +# Debian docker-ce repo +containerd_debian_repo_base_url: "https://download.docker.com/linux/debian" +containerd_debian_repo_component: "stable" diff --git a/roles/container-engine/containerd/defaults/main.yml b/roles/container-engine/containerd/defaults/main.yml index a0865bd782b..7f76ef33108 100644 --- a/roles/container-engine/containerd/defaults/main.yml +++ b/roles/container-engine/containerd/defaults/main.yml @@ -64,8 +64,7 @@ containerd_registries_mirrors: skip_verify: false # ca: ["/etc/certs/mirror.pem"] # client: [["/etc/certs/client.pem", ""],["/etc/certs/client.cert", "/etc/certs/client.key"]] -# header: -# Authorization: "Basic XXX" + containerd_max_container_log_line_size: 16384 # If enabled it will allow non root users to use port numbers <1024 diff --git a/roles/container-engine/containerd/molecule/default/converge.yml b/roles/container-engine/containerd/molecule/default/converge.yml index 2a061fcb361..7847871e28b 100644 --- a/roles/container-engine/containerd/molecule/default/converge.yml +++ b/roles/container-engine/containerd/molecule/default/converge.yml @@ -5,5 +5,5 @@ vars: container_manager: containerd roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/containerd diff --git a/roles/container-engine/containerd/molecule/default/molecule.yml b/roles/container-engine/containerd/molecule/default/molecule.yml index b62b9493222..0ad3b794656 100644 --- a/roles/container-engine/containerd/molecule/default/molecule.yml +++ b/roles/container-engine/containerd/molecule/default/molecule.yml @@ -1,16 +1,16 @@ --- role_name_check: 1 platforms: - - cloud_image: ubuntu-2404 - name: ubuntu24 + - cloud_image: ubuntu-2004 + name: ubuntu20 vm_cpu_cores: 1 vm_memory: 1024 node_groups: - kube_control_plane - kube_node - k8s_cluster - - cloud_image: debian-12 - name: debian12 + - cloud_image: debian-11 + name: debian11 vm_cpu_cores: 1 vm_memory: 1024 node_groups: @@ -35,6 +35,5 @@ provisioner: timeout: 120 playbooks: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/containerd/molecule/default/prepare.yml b/roles/container-engine/containerd/molecule/default/prepare.yml new file mode 100644 index 00000000000..a3d09ad8005 --- /dev/null +++ b/roles/container-engine/containerd/molecule/default/prepare.yml @@ -0,0 +1,29 @@ +--- +- name: Prepare + hosts: all + gather_facts: false + become: true + vars: + ignore_assert_errors: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: kubernetes/preinstall + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare CNI + hosts: all + gather_facts: false + become: true + vars: + ignore_assert_errors: true + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni diff --git a/roles/container-engine/containerd/molecule/default/tests/test_default.py b/roles/container-engine/containerd/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..e1d915179bf --- /dev/null +++ b/roles/container-engine/containerd/molecule/default/tests/test_default.py @@ -0,0 +1,55 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("containerd") + assert svc.is_running + assert svc.is_enabled + + +def test_version(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/containerd/containerd.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: containerd" in cmd.stdout + + +@pytest.mark.parametrize('image, dest', [ + ('quay.io/kubespray/hello-world:latest', '/tmp/hello-world.tar') +]) +def test_image_pull_save_load(host, image, dest): + nerdctl = "/usr/local/bin/nerdctl" + dest_file = host.file(dest) + + with host.sudo(): + pull_cmd = host.command(nerdctl + " pull " + image) + assert pull_cmd.rc ==0 + + with host.sudo(): + save_cmd = host.command(nerdctl + " save -o " + dest + " " + image) + assert save_cmd.rc == 0 + assert dest_file.exists + + with host.sudo(): + load_cmd = host.command(nerdctl + " load < " + dest) + assert load_cmd.rc == 0 + + +@pytest.mark.parametrize('image', [ + ('quay.io/kubespray/hello-world:latest') +]) +def test_run(host, image): + nerdctl = "/usr/local/bin/nerdctl" + + with host.sudo(): + cmd = host.command(nerdctl + " -n k8s.io run " + image) + assert cmd.rc == 0 + assert "Hello from Docker" in cmd.stdout diff --git a/roles/container-engine/containerd/molecule/default/verify.yml b/roles/container-engine/containerd/molecule/default/verify.yml deleted file mode 100644 index 96ad82d2ac5..00000000000 --- a/roles/container-engine/containerd/molecule/default/verify.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Test containerd CRI - import_playbook: ../../../molecule/test_cri.yml - vars: - container_manager: containerd - cri_socket: unix:///var/run/containerd/containerd.sock - cri_name: containerd - -- name: Test nerdctl - hosts: all - gather_facts: false - become: true - tasks: - - name: Get kubespray defaults - import_role: - name: ../../../../../kubespray_defaults - - name: Test nerdctl commands - command: "{{ bin_dir }}/nerdctl {{ item | join(' ') }}" - vars: - image: quay.io/kubespray/hello-world:latest - loop: - - - pull - - "{{ image }}" - - - save - - -o - - /tmp/hello-world.tar - - "{{ image }}" - - - load - - -i - - /tmp/hello-world.tar - - - -n - - k8s.io - - run - - "{{ image }}" - register: nerdctl - - name: Check log from running a container - assert: - that: - - ('Hello from Docker' in nerdctl.results[3].stdout) diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index ae726b78db5..39005b97ccf 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -1,4 +1,31 @@ --- +- name: Fail containerd setup if distribution is not supported + fail: + msg: "{{ ansible_distribution }} is not supported by containerd." + when: + - not (allow_unsupported_distribution_setup | default(false)) and (ansible_distribution not in containerd_supported_distributions) + +- name: Containerd | Remove any package manager controlled containerd package + package: + name: "{{ containerd_package }}" + state: absent + when: + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + +- name: Containerd | Remove containerd repository + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + +- name: Containerd | Remove containerd repository + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + - name: Containerd | Download containerd include_tasks: "../../../download/tasks/download_file.yml" vars: @@ -14,6 +41,21 @@ - --strip-components=1 notify: Restart containerd +- name: Containerd | Remove orphaned binary + file: + path: "/usr/bin/{{ item }}" + state: absent + when: + - containerd_bin_dir != "/usr/bin" + - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) + ignore_errors: true # noqa ignore-errors + with_items: + - containerd + - containerd-shim + - containerd-shim-runc-v1 + - containerd-shim-runc-v2 + - ctr + - name: Containerd | Generate systemd service for containerd template: src: containerd.service.j2 @@ -73,8 +115,6 @@ notify: Restart containerd - name: Containerd | Configure containerd registries - # mirror configuration can contain sensitive information on headers configuration - no_log: "{{ not (unsafe_show_logs | bool) }}" block: - name: Containerd | Create registry directories file: diff --git a/roles/container-engine/containerd/tasks/reset.yml b/roles/container-engine/containerd/tasks/reset.yml index 0e70cded4d7..517e56da670 100644 --- a/roles/container-engine/containerd/tasks/reset.yml +++ b/roles/container-engine/containerd/tasks/reset.yml @@ -1,4 +1,22 @@ --- +- name: Containerd | Remove containerd repository for RedHat os family + file: + path: "{{ yum_repo_dir }}/containerd.repo" + state: absent + when: + - ansible_os_family in ['RedHat'] + tags: + - reset_containerd + +- name: Containerd | Remove containerd repository for Debian os family + apt_repository: + repo: "{{ item }}" + state: absent + with_items: "{{ containerd_repo_info.repos }}" + when: ansible_pkg_mgr == 'apt' + tags: + - reset_containerd + - name: Containerd | Stop containerd service service: name: containerd diff --git a/roles/container-engine/containerd/templates/hosts.toml.j2 b/roles/container-engine/containerd/templates/hosts.toml.j2 index 0f5b3d013b5..b2b16a65ffb 100644 --- a/roles/container-engine/containerd/templates/hosts.toml.j2 +++ b/roles/container-engine/containerd/templates/hosts.toml.j2 @@ -10,10 +10,4 @@ server = "{{ item.server | default("https://" + item.prefix) }}" {% if mirror.client is defined %} client = [{% for pair in mirror.client %}["{{ pair[0] }}", "{{ pair[1] }}"]{% if not loop.last %},{% endif %}{% endfor %}] {% endif %} -{% if mirror.header is defined %} - [host."{{ mirror.host }}".header] -{% for key, value in mirror.header.items() %} - {{ key }} = ["{{ ([ value ] | flatten ) | join('","') }}"] -{% endfor %} -{% endif %} {% endfor %} diff --git a/roles/container-engine/containerd/vars/debian.yml b/roles/container-engine/containerd/vars/debian.yml new file mode 100644 index 00000000000..8b18d9a9f4e --- /dev/null +++ b/roles/container-engine/containerd/vars/debian.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_debian_repo_base_url }} + {{ ansible_distribution_release | lower }} + {{ containerd_debian_repo_component }} diff --git a/roles/container-engine/containerd/vars/ubuntu.yml b/roles/container-engine/containerd/vars/ubuntu.yml new file mode 100644 index 00000000000..dd775323dde --- /dev/null +++ b/roles/container-engine/containerd/vars/ubuntu.yml @@ -0,0 +1,7 @@ +--- +containerd_repo_info: + repos: + - > + deb {{ containerd_ubuntu_repo_base_url }} + {{ ansible_distribution_release | lower }} + {{ containerd_ubuntu_repo_component }} diff --git a/roles/container-engine/cri-dockerd/molecule/default/converge.yml b/roles/container-engine/cri-dockerd/molecule/default/converge.yml index 05053734380..be6fa381225 100644 --- a/roles/container-engine/cri-dockerd/molecule/default/converge.yml +++ b/roles/container-engine/cri-dockerd/molecule/default/converge.yml @@ -5,5 +5,5 @@ vars: container_manager: docker roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/cri-dockerd diff --git a/roles/container-engine/cri-dockerd/molecule/default/molecule.yml b/roles/container-engine/cri-dockerd/molecule/default/molecule.yml index 78702376404..cff276e423e 100644 --- a/roles/container-engine/cri-dockerd/molecule/default/molecule.yml +++ b/roles/container-engine/cri-dockerd/molecule/default/molecule.yml @@ -7,8 +7,8 @@ platforms: vm_memory: 1024 node_groups: - kube_control_plane - - name: ubuntu22 - cloud_image: ubuntu-2204 + - name: ubuntu20 + cloud_image: ubuntu-2004 vm_cpu_cores: 1 vm_memory: 1024 node_groups: @@ -27,6 +27,5 @@ provisioner: become: true playbooks: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/cri-dockerd/molecule/default/prepare.yml b/roles/container-engine/cri-dockerd/molecule/default/prepare.yml new file mode 100644 index 00000000000..b5328422a8d --- /dev/null +++ b/roles/container-engine/cri-dockerd/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py b/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..dc99b34981b --- /dev/null +++ b/roles/container-engine/cri-dockerd/molecule/default/tests/test_default.py @@ -0,0 +1,19 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run_pod(host): + run_command = "/usr/local/bin/crictl run --with-pull /tmp/container.json /tmp/sandbox.json" + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/cri-dockerd1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/cri-dockerd/molecule/default/verify.yml b/roles/container-engine/cri-dockerd/molecule/default/verify.yml deleted file mode 100644 index a11eb86f5fe..00000000000 --- a/roles/container-engine/cri-dockerd/molecule/default/verify.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Test cri-dockerd - import_playbook: ../../../molecule/test_cri.yml - vars: - container_manager: cri-dockerd - cri_socket: unix:///var/run/cri-dockerd.sock - cri_name: docker - -- name: Test running a container with docker - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: docker - # cri-dockerd does not support multiple runtime handler before 0.4.0 - # https://github.com/Mirantis/cri-dockerd/pull/350 - # TODO: check this when we upgrade cri-dockerd diff --git a/roles/container-engine/cri-o/defaults/main.yml b/roles/container-engine/cri-o/defaults/main.yml index b7e34654bad..4de3b178119 100644 --- a/roles/container-engine/cri-o/defaults/main.yml +++ b/roles/container-engine/cri-o/defaults/main.yml @@ -44,7 +44,7 @@ crio_root: "/var/lib/containers/storage" # The crio_runtimes variable defines a list of OCI compatible runtimes. crio_runtimes: - name: crun - path: "{{ crio_runtime_bin_dir }}/crun" # Use crun in cri-o distributions, don't use 'crun' role + path: "{{ crio_runtime_bin_dir }}/crun" type: oci root: /run/crun diff --git a/roles/container-engine/cri-o/meta/main.yml b/roles/container-engine/cri-o/meta/main.yml index 5289208fb4a..99e803a5170 100644 --- a/roles/container-engine/cri-o/meta/main.yml +++ b/roles/container-engine/cri-o/meta/main.yml @@ -1,4 +1,5 @@ --- dependencies: + - role: container-engine/crun - role: container-engine/crictl - role: container-engine/skopeo diff --git a/roles/container-engine/cri-o/molecule/default/converge.yml b/roles/container-engine/cri-o/molecule/default/converge.yml index 85361b4397e..376f07c4582 100644 --- a/roles/container-engine/cri-o/molecule/default/converge.yml +++ b/roles/container-engine/cri-o/molecule/default/converge.yml @@ -5,5 +5,5 @@ vars: container_manager: crio roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/cri-o diff --git a/roles/container-engine/molecule/files/10-mynet.conf b/roles/container-engine/cri-o/molecule/default/files/10-mynet.conf similarity index 100% rename from roles/container-engine/molecule/files/10-mynet.conf rename to roles/container-engine/cri-o/molecule/default/files/10-mynet.conf diff --git a/roles/container-engine/molecule/templates/container.json.j2 b/roles/container-engine/cri-o/molecule/default/files/container.json similarity index 55% rename from roles/container-engine/molecule/templates/container.json.j2 rename to roles/container-engine/cri-o/molecule/default/files/container.json index fc52def81c9..bcd71e7e586 100644 --- a/roles/container-engine/molecule/templates/container.json.j2 +++ b/roles/container-engine/cri-o/molecule/default/files/container.json @@ -1,10 +1,10 @@ { "metadata": { - "name": "{{ container_runtime }}1" + "name": "runc1" }, "image": { "image": "quay.io/kubespray/hello-world:latest" }, - "log_path": "{{ container_runtime }}1.0.log", + "log_path": "runc1.0.log", "linux": {} } diff --git a/roles/container-engine/molecule/templates/sandbox.json.j2 b/roles/container-engine/cri-o/molecule/default/files/sandbox.json similarity index 79% rename from roles/container-engine/molecule/templates/sandbox.json.j2 rename to roles/container-engine/cri-o/molecule/default/files/sandbox.json index dc2894736bd..eb9dcb9d282 100644 --- a/roles/container-engine/molecule/templates/sandbox.json.j2 +++ b/roles/container-engine/cri-o/molecule/default/files/sandbox.json @@ -1,6 +1,6 @@ { "metadata": { - "name": "{{ container_runtime }}1", + "name": "runc1", "namespace": "default", "attempt": 1, "uid": "hdishd83djaidwnduwk28bcsb" diff --git a/roles/container-engine/cri-o/molecule/default/molecule.yml b/roles/container-engine/cri-o/molecule/default/molecule.yml index e5bf20e5df4..6bbaabf7af3 100644 --- a/roles/container-engine/cri-o/molecule/default/molecule.yml +++ b/roles/container-engine/cri-o/molecule/default/molecule.yml @@ -1,8 +1,8 @@ --- role_name_check: 1 platforms: - - name: ubuntu22 - cloud_image: ubuntu-2204 + - name: ubuntu20 + cloud_image: ubuntu-2004 vm_cpu_cores: 2 vm_memory: 1024 node_groups: @@ -43,6 +43,5 @@ provisioner: timeout: 120 playbooks: create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/molecule/prepare.yml b/roles/container-engine/cri-o/molecule/default/prepare.yml similarity index 58% rename from roles/container-engine/molecule/prepare.yml rename to roles/container-engine/cri-o/molecule/default/prepare.yml index 9faf3a8656c..55ad5174d70 100644 --- a/roles/container-engine/molecule/prepare.yml +++ b/roles/container-engine/cri-o/molecule/default/prepare.yml @@ -6,15 +6,14 @@ vars: ignore_assert_errors: true roles: - - role: dynamic_groups - - role: bootstrap_os - - role: network_facts + - role: kubespray-defaults + - role: bootstrap-os - role: kubernetes/preinstall - role: adduser user: "{{ addusers.kube }}" tasks: - name: Download CNI - include_tasks: "../../download/tasks/download_file.yml" + include_tasks: "../../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.cni) }}" @@ -26,18 +25,29 @@ ignore_assert_errors: true kube_network_plugin: cni roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: network_plugin/cni tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json - name: Create /etc/cni/net.d directory file: path: /etc/cni/net.d state: directory - owner: root + owner: "{{ kube_owner }}" mode: "0755" - - name: Config bridge host-local CNI + - name: Setup CNI copy: - src: "10-mynet.conf" - dest: "/etc/cni/net.d/" + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" owner: root mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/cri-o/molecule/default/tests/test_default.py b/roles/container-engine/cri-o/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..3e38fa5b2ae --- /dev/null +++ b/roles/container-engine/cri-o/molecule/default/tests/test_default.py @@ -0,0 +1,35 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_service(host): + svc = host.service("crio") + assert svc.is_running + assert svc.is_enabled + + +def test_run(host): + crictl = "/usr/local/bin/crictl" + path = "unix:///var/run/crio/crio.sock" + with host.sudo(): + cmd = host.command(crictl + " --runtime-endpoint " + path + " version") + assert cmd.rc == 0 + assert "RuntimeName: cri-o" in cmd.stdout + +def test_run_pod(host): + runtime = "crun" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/runc1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/cri-o/molecule/default/verify.yml b/roles/container-engine/cri-o/molecule/default/verify.yml deleted file mode 100644 index a40eb34d56a..00000000000 --- a/roles/container-engine/cri-o/molecule/default/verify.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Test CRI-O cri - import_playbook: ../../../molecule/test_cri.yml - vars: - container_manager: crio - cri_socket: unix:///var/run/crio/crio.sock - cri_name: cri-o -- name: Test running a container with crun - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: crun diff --git a/roles/container-engine/docker/defaults/main.yml b/roles/container-engine/docker/defaults/main.yml index 29e8904145f..543f8f294ec 100644 --- a/roles/container-engine/docker/defaults/main.yml +++ b/roles/container-engine/docker/defaults/main.yml @@ -1,5 +1,5 @@ --- -docker_version: '28.3' +docker_version: '28.0' docker_cli_version: "{{ docker_version }}" docker_package_info: diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index 62293264d8e..46f3bce88e5 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -60,17 +60,16 @@ environment: "{{ proxy_env }}" when: ansible_pkg_mgr == 'apt' -# ref to https://github.com/kubernetes-sigs/kubespray/issues/11086 & 12424 -- name: Convert -backports sources to archive.debian.org for bullseye and older - replace: - path: "{{ item }}" - regexp: '^(deb(?:-src)?\s+)(?:https?://)?(?:[^ ]+debian\.org)?([^ ]*/debian)(\s+{{ ansible_distribution_release }}-backports\b.*)' - replace: '\1http://archive.debian.org/debian\3' +# ref to https://github.com/kubernetes-sigs/kubespray/issues/11086 +- name: Remove the archived debian apt repository + lineinfile: + path: /etc/apt/sources.list + regexp: 'buster-backports' + state: absent backup: true - loop: "{{ query('fileglob', '/etc/apt/sources.list') }}" when: - ansible_os_family == 'Debian' - - ansible_distribution_release in ['bullseye', 'buster'] + - ansible_distribution_release == "buster" - name: Ensure docker-ce repository is enabled apt_repository: diff --git a/roles/container-engine/docker/vars/debian.yml b/roles/container-engine/docker/vars/debian.yml index ee2e932811f..f5a0fc99b9d 100644 --- a/roles/container-engine/docker/vars/debian.yml +++ b/roles/container-engine/docker/vars/debian.yml @@ -34,10 +34,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}=1.7.23-1" '1.7.24': "{{ containerd_package }}=1.7.24-1" '1.7.25': "{{ containerd_package }}=1.7.25-1" - '1.7.26': "{{ containerd_package }}=1.7.26-1" - '1.7.27': "{{ containerd_package }}=1.7.27-1" - 'stable': "{{ containerd_package }}=1.7.27-1" - 'edge': "{{ containerd_package }}=1.7.27-1" + 'stable': "{{ containerd_package }}=1.7.25-1" + 'edge': "{{ containerd_package }}=1.7.25-1" # https://download.docker.com/linux/debian/ docker_versioned_pkg: @@ -55,13 +53,10 @@ docker_versioned_pkg: '27.2': docker-ce=5:27.2.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.3': docker-ce=5:27.3.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce=5:27.4.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '27.5': docker-ce=5:27.5.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce=5:28.0.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce=5:28.1.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce=5:28.2.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'stable': docker-ce=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'edge': docker-ce=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '27.5': docker-ce=5:27.5.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli @@ -78,13 +73,10 @@ docker_cli_versioned_pkg: '27.2': docker-ce-cli=5:27.2.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.3': docker-ce-cli=5:27.3.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce-cli=5:27.4.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '27.5': docker-ce-cli=5:27.5.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce-cli=5:28.0.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce-cli=5:28.1.1-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce-cli=5:28.2.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce-cli=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'stable': docker-ce-cli=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} - 'edge': docker-ce-cli=5:28.3.3-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '27.5': docker-ce-cli=5:27.5.4-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce-cli=5:28.0.2-1~debian.{{ ansible_distribution_major_version }}~{{ ansible_distribution_release | lower }} docker_package_info: pkgs: diff --git a/roles/container-engine/docker/vars/fedora.yml b/roles/container-engine/docker/vars/fedora.yml index 5140fee12ba..f713acc0669 100644 --- a/roles/container-engine/docker/vars/fedora.yml +++ b/roles/container-engine/docker/vars/fedora.yml @@ -34,10 +34,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}-1.7.23-3.1.fc{{ ansible_distribution_major_version }}" '1.7.24': "{{ containerd_package }}-1.7.24-3.1.fc{{ ansible_distribution_major_version }}" '1.7.25': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}" - '1.7.26': "{{ containerd_package }}-1.7.26-3.1.fc{{ ansible_distribution_major_version }}" - '1.7.27': "{{ containerd_package }}-1.7.27-3.1.fc{{ ansible_distribution_major_version }}" - 'stable': "{{ containerd_package }}-1.7.27-3.1.fc{{ ansible_distribution_major_version }}" - 'edge': "{{ containerd_package }}-1.7.27-3.1.fc{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.7.25-3.1.fc{{ ansible_distribution_major_version }}" # https://docs.docker.com/install/linux/docker-ce/fedora/ # https://download.docker.com/linux/fedora//x86_64/stable/Packages/ @@ -55,13 +53,9 @@ docker_versioned_pkg: '27.3': docker-ce-3:27.3.1-1.fc{{ ansible_distribution_major_version }} '27.4': docker-ce-3:27.4.1-1.fc{{ ansible_distribution_major_version }} '27.5': docker-ce-3:27.5.1-1.fc{{ ansible_distribution_major_version }} - '28.0': docker-ce-3:28.0.4-1.fc{{ ansible_distribution_major_version }} - '28.1': docker-ce-3:28.1.1-1.fc{{ ansible_distribution_major_version }} - '28.2': docker-ce-3:28.2.2-1.fc{{ ansible_distribution_major_version }} - '28.3': docker-ce-3:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'stable': docker-ce-3:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'edge': docker-ce-3:28.3.3-1.fc{{ ansible_distribution_major_version }} - + '28.0': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:28.0.2-1.fc{{ ansible_distribution_major_version }} docker_cli_versioned_pkg: 'latest': docker-ce-cli @@ -77,12 +71,9 @@ docker_cli_versioned_pkg: '27.3': docker-ce-cli-1:27.3.1-1.fc{{ ansible_distribution_major_version }} '27.4': docker-ce-cli-1:27.4.1-1.fc{{ ansible_distribution_major_version }} '27.5': docker-ce-cli-1:27.5.1-1.fc{{ ansible_distribution_major_version }} - '28.0': docker-ce-cli-1:28.0.4-1.fc{{ ansible_distribution_major_version }} - '28.1': docker-ce-cli-1:28.1.1-1.fc{{ ansible_distribution_major_version }} - '28.2': docker-ce-cli-1:28.2.2-1.fc{{ ansible_distribution_major_version }} - '28.3': docker-ce-cli-1:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'stable': docker-ce-cli-1:28.3.3-1.fc{{ ansible_distribution_major_version }} - 'edge': docker-ce-cli-1:28.3.3-1.fc{{ ansible_distribution_major_version }} + '28.0': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:28.0.2-1.fc{{ ansible_distribution_major_version }} docker_package_info: enablerepo: "docker-ce" diff --git a/roles/container-engine/docker/vars/redhat.yml b/roles/container-engine/docker/vars/redhat.yml index 3c832369f72..289453ab5c4 100644 --- a/roles/container-engine/docker/vars/redhat.yml +++ b/roles/container-engine/docker/vars/redhat.yml @@ -34,10 +34,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}-1.7.23-3.1.el{{ ansible_distribution_major_version }}" '1.7.24': "{{ containerd_package }}-1.7.24-3.1.el{{ ansible_distribution_major_version }}" '1.7.25': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}" - '1.7.26': "{{ containerd_package }}-1.7.26-3.1.el{{ ansible_distribution_major_version }}" - '1.7.27': "{{ containerd_package }}-1.7.27-3.1.el{{ ansible_distribution_major_version }}" - 'stable': "{{ containerd_package }}-1.7.27-3.1.el{{ ansible_distribution_major_version }}" - 'edge': "{{ containerd_package }}-1.7.27-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.7.25-3.1.el{{ ansible_distribution_major_version }}" # https://docs.docker.com/engine/installation/linux/rhel/#install-from-a-package # https://download.docker.com/linux/rhel/>/x86_64/stable/Packages/ @@ -46,45 +44,39 @@ docker_versioned_pkg: 'latest': docker-ce '18.09': docker-ce-3:18.09.9-3.el7 '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} - '20.10': docker-ce-3:20.10.24-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-3:20.10.20-3.el{{ ansible_distribution_major_version }} '23.0': docker-ce-3:23.0.6-1.el{{ ansible_distribution_major_version }} '24.0': docker-ce-3:24.0.9-1.el{{ ansible_distribution_major_version }} '26.0': docker-ce-3:26.0.2-1.el{{ ansible_distribution_major_version }} '26.1': docker-ce-3:26.1.4-1.el{{ ansible_distribution_major_version }} '27.0': docker-ce-3:27.0.3-1.el{{ ansible_distribution_major_version }} - '27.1': docker-ce-3:27.1.2-1.el{{ ansible_distribution_major_version }} - '27.2': docker-ce-3:27.2.1-1.el{{ ansible_distribution_major_version }} - '27.3': docker-ce-3:27.3.1-1.el{{ ansible_distribution_major_version }} - '27.4': docker-ce-3:27.4.1-1.el{{ ansible_distribution_major_version }} - '27.5': docker-ce-3:27.5.1-1.el{{ ansible_distribution_major_version }} - '28.0': docker-ce-3:28.0.4-1.el{{ ansible_distribution_major_version }} - '28.1': docker-ce-3:28.1.1-1.el{{ ansible_distribution_major_version }} - '28.2': docker-ce-3:28.2.2-1.el{{ ansible_distribution_major_version }} - '28.3': docker-ce-3:28.3.3-1.el{{ ansible_distribution_major_version }} - 'stable': docker-ce-3:28.3.3-1.el{{ ansible_distribution_major_version }} - 'edge': docker-ce-3:28.3.3-1.el{{ ansible_distribution_major_version }} + '27.1': docker-ce-3:27.1.3-1.el{{ ansible_distribution_major_version }} + '27.2': docker-ce-3:27.2.3-1.el{{ ansible_distribution_major_version }} + '27.3': docker-ce-3:27.3.3-1.el{{ ansible_distribution_major_version }} + '27.4': docker-ce-3:27.4.3-1.el{{ ansible_distribution_major_version }} + '27.5': docker-ce-3:27.5.3-1.el{{ ansible_distribution_major_version }} + '28.0': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:28.0.2-1.el{{ ansible_distribution_major_version }} docker_cli_versioned_pkg: 'latest': docker-ce-cli '18.09': docker-ce-cli-1:18.09.9-3.el7 '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} - '20.10': docker-ce-cli-1:20.10.24-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-1:20.10.20-3.el{{ ansible_distribution_major_version }} '23.0': docker-ce-cli-1:23.0.6-1.el{{ ansible_distribution_major_version }} '24.0': docker-ce-cli-1:24.0.9-1.el{{ ansible_distribution_major_version }} '26.0': docker-ce-cli-1:26.0.2-1.el{{ ansible_distribution_major_version }} '26.1': docker-ce-cli-1:26.1.4-1.el{{ ansible_distribution_major_version }} '27.0': docker-ce-cli-1:27.0.3-1.el{{ ansible_distribution_major_version }} - '27.1': docker-ce-cli-1:27.1.2-1.el{{ ansible_distribution_major_version }} - '27.2': docker-ce-cli-1:27.2.1-1.el{{ ansible_distribution_major_version }} - '27.3': docker-ce-cli-1:27.3.1-1.el{{ ansible_distribution_major_version }} - '27.4': docker-ce-cli-1:27.4.1-1.el{{ ansible_distribution_major_version }} - '27.5': docker-ce-cli-1:27.5.1-1.el{{ ansible_distribution_major_version }} - '28.0': docker-ce-cli-1:28.0.4-1.el{{ ansible_distribution_major_version }} - '28.1': docker-ce-cli-1:28.1.1-1.el{{ ansible_distribution_major_version }} - '28.2': docker-ce-cli-1:28.2.2-1.el{{ ansible_distribution_major_version }} - '28.3': docker-ce-cli-1:28.3.3-1.el{{ ansible_distribution_major_version }} - 'stable': docker-ce-cli-1:28.3.3-1.el{{ ansible_distribution_major_version }} - 'edge': docker-ce-cli-1:28.3.3-1.el{{ ansible_distribution_major_version }} + '27.1': docker-ce-cli-1:27.1.3-1.el{{ ansible_distribution_major_version }} + '27.2': docker-ce-cli-1:27.2.3-1.el{{ ansible_distribution_major_version }} + '27.3': docker-ce-cli-1:27.3.3-1.el{{ ansible_distribution_major_version }} + '27.4': docker-ce-cli-1:27.4.3-1.el{{ ansible_distribution_major_version }} + '27.5': docker-ce-cli-1:27.5.3-1.el{{ ansible_distribution_major_version }} + '28.0': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:28.0.2-1.el{{ ansible_distribution_major_version }} docker_package_info: enablerepo: "docker-ce" diff --git a/roles/container-engine/docker/vars/ubuntu.yml b/roles/container-engine/docker/vars/ubuntu.yml index 5d9158ec229..1d4e7bb3e7c 100644 --- a/roles/container-engine/docker/vars/ubuntu.yml +++ b/roles/container-engine/docker/vars/ubuntu.yml @@ -27,10 +27,8 @@ containerd_versioned_pkg: '1.7.23': "{{ containerd_package }}=1.7.23-1" '1.7.24': "{{ containerd_package }}=1.7.24-1" '1.7.25': "{{ containerd_package }}=1.7.25-1" - '1.7.26': "{{ containerd_package }}=1.7.26-1" - '1.7.27': "{{ containerd_package }}=1.7.27-1" - 'stable': "{{ containerd_package }}=1.7.27-1" - 'edge': "{{ containerd_package }}=1.7.27-1" + 'stable': "{{ containerd_package }}=1.7.25-1" + 'edge': "{{ containerd_package }}=1.7.25-1" # https://download.docker.com/linux/ubuntu/ docker_versioned_pkg: @@ -48,10 +46,9 @@ docker_versioned_pkg: '27.3': docker-ce=5:27.3.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce=5:27.4.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.5': docker-ce=5:27.5.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce=5:28.0.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce=5:28.1.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce=5:28.2.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} docker_cli_versioned_pkg: 'latest': docker-ce-cli @@ -68,12 +65,9 @@ docker_cli_versioned_pkg: '27.3': docker-ce-cli=5:27.3.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.4': docker-ce-cli=5:27.4.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} '27.5': docker-ce-cli=5:27.5.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.0': docker-ce-cli=5:28.0.4-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.1': docker-ce-cli=5:28.1.1-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.2': docker-ce-cli=5:28.2.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - '28.3': docker-ce-cli=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - 'stable': docker-ce-cli=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} - 'edge': docker-ce-cli=5:28.3.3-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + '28.0': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'stable': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} + 'edge': docker-ce-cli=5:28.0.2-1~ubuntu.{{ ansible_distribution_version }}~{{ ansible_distribution_release | lower }} docker_package_info: pkgs: diff --git a/roles/container-engine/gvisor/molecule/default/converge.yml b/roles/container-engine/gvisor/molecule/default/converge.yml index 552a8888962..b14d078a182 100644 --- a/roles/container-engine/gvisor/molecule/default/converge.yml +++ b/roles/container-engine/gvisor/molecule/default/converge.yml @@ -6,6 +6,6 @@ gvisor_enabled: true container_manager: containerd roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/containerd - role: container-engine/gvisor diff --git a/roles/container-engine/gvisor/molecule/default/molecule.yml b/roles/container-engine/gvisor/molecule/default/molecule.yml index f73a9775cc4..9bf49633149 100644 --- a/roles/container-engine/gvisor/molecule/default/molecule.yml +++ b/roles/container-engine/gvisor/molecule/default/molecule.yml @@ -1,18 +1,28 @@ --- role_name_check: 1 +driver: + name: vagrant + provider: + name: libvirt platforms: - - cloud_image: ubuntu-2404 - name: ubuntu24 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane + provider_options: + driver: kvm - name: almalinux9 - cloud_image: almalinux-9 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + box: almalinux/9 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane + provider_options: + driver: kvm provisioner: name: ansible env: @@ -21,8 +31,9 @@ provisioner: defaults: callbacks_enabled: profile_tasks timeout: 120 - playbooks: - create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml + inventory: + group_vars: + all: + become: true verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/gvisor/molecule/default/prepare.yml b/roles/container-engine/gvisor/molecule/default/prepare.yml new file mode 100644 index 00000000000..57c21f2dda2 --- /dev/null +++ b/roles/container-engine/gvisor/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/gvisor/molecule/default/tests/test_default.py b/roles/container-engine/gvisor/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..1cb7fb0ffb1 --- /dev/null +++ b/roles/container-engine/gvisor/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + gvisorruntime = "/usr/local/bin/runsc" + with host.sudo(): + cmd = host.command(gvisorruntime + " --version") + assert cmd.rc == 0 + assert "runsc version" in cmd.stdout + + +def test_run_pod(host): + runtime = "runsc" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/gvisor1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/gvisor/molecule/default/verify.yml b/roles/container-engine/gvisor/molecule/default/verify.yml deleted file mode 100644 index 35e847e5323..00000000000 --- a/roles/container-engine/gvisor/molecule/default/verify.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Test gvisor - hosts: all - gather_facts: false - tasks: - - name: Get kubespray defaults - import_role: - name: ../../../../../kubespray_defaults - - name: Test version - command: "{{ bin_dir }}/runsc --version" - register: runsc_version - failed_when: > - runsc_version is failed or - 'runsc version' not in runsc_version.stdout - -- name: Test run container - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: runsc diff --git a/roles/container-engine/kata-containers/molecule/default/converge.yml b/roles/container-engine/kata-containers/molecule/default/converge.yml index 9a7922e9fa1..a6fdf812a78 100644 --- a/roles/container-engine/kata-containers/molecule/default/converge.yml +++ b/roles/container-engine/kata-containers/molecule/default/converge.yml @@ -6,6 +6,6 @@ kata_containers_enabled: true container_manager: containerd roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/containerd - role: container-engine/kata-containers diff --git a/roles/container-engine/kata-containers/molecule/default/molecule.yml b/roles/container-engine/kata-containers/molecule/default/molecule.yml index 6e6e1c7cb52..8eaa5d7b87b 100644 --- a/roles/container-engine/kata-containers/molecule/default/molecule.yml +++ b/roles/container-engine/kata-containers/molecule/default/molecule.yml @@ -1,18 +1,28 @@ --- role_name_check: 1 +driver: + name: vagrant + provider: + name: libvirt platforms: - - name: ubuntu22 - cloud_image: ubuntu-2204 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane - - name: ubuntu24 - cloud_image: ubuntu-2404 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + provider_options: + driver: kvm + - name: ubuntu22 + box: generic/ubuntu2204 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane + provider_options: + driver: kvm provisioner: name: ansible env: @@ -21,8 +31,9 @@ provisioner: defaults: callbacks_enabled: profile_tasks timeout: 120 - playbooks: - create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml + inventory: + group_vars: + all: + become: true verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/kata-containers/molecule/default/prepare.yml b/roles/container-engine/kata-containers/molecule/default/prepare.yml new file mode 100644 index 00000000000..a5abd27bb45 --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- +- name: Prepare + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: containerd + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: "{{ kube_owner }}" + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/kata-containers/molecule/default/tests/test_default.py b/roles/container-engine/kata-containers/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..e10fff4b788 --- /dev/null +++ b/roles/container-engine/kata-containers/molecule/default/tests/test_default.py @@ -0,0 +1,37 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " version") + assert cmd.rc == 0 + assert "kata-runtime" in cmd.stdout + + +def test_run_check(host): + kataruntime = "/opt/kata/bin/kata-runtime" + with host.sudo(): + cmd = host.command(kataruntime + " check") + assert cmd.rc == 0 + assert "System is capable of running" in cmd.stdout + + +def test_run_pod(host): + runtime = "kata-qemu" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/kata1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/kata-containers/molecule/default/verify.yml b/roles/container-engine/kata-containers/molecule/default/verify.yml deleted file mode 100644 index 1bb02c32147..00000000000 --- a/roles/container-engine/kata-containers/molecule/default/verify.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Test kata-containers - hosts: all - gather_facts: false - tasks: - - name: Test version - command: "/opt/kata/bin/kata-runtime version" - register: version - failed_when: > - version is failed or - 'kata-runtime' not in version.stdout - - name: Test version - command: "/opt/kata/bin/kata-runtime check" - register: check - failed_when: > - check is failed or - 'System is capable of running' not in check.stdout - -- name: Test run container - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: kata-qemu - container_manager: containerd diff --git a/roles/container-engine/molecule/test_cri.yml b/roles/container-engine/molecule/test_cri.yml deleted file mode 100644 index e40fe111f2e..00000000000 --- a/roles/container-engine/molecule/test_cri.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Test container manager - hosts: all - gather_facts: false - become: true - tasks: - - name: Get kubespray defaults - import_role: - name: ../../kubespray_defaults - - name: Collect services facts - ansible.builtin.service_facts: - - - name: Check container manager service is running - assert: - that: - - ansible_facts.services[container_manager + '.service'].state == 'running' - - ansible_facts.services[container_manager + '.service'].status == 'enabled' - - - name: Check runtime version - command: "{{ bin_dir }}/crictl --runtime-endpoint {{ cri_socket }} version" - register: cri_version - failed_when: > - cri_version is failed or - ("RuntimeName: " + cri_name) not in cri_version.stdout diff --git a/roles/container-engine/molecule/test_runtime.yml b/roles/container-engine/molecule/test_runtime.yml deleted file mode 100644 index e9706362944..00000000000 --- a/roles/container-engine/molecule/test_runtime.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: Test container runtime - hosts: all - gather_facts: false - become: true - roles: - - role: ../../kubespray_defaults - tasks: - - name: Copy test container files - template: - src: "{{ item }}.j2" - dest: "/tmp/{{ item }}" - owner: root - mode: "0644" - loop: - - container.json - - sandbox.json - - name: Check running a container with runtime {{ container_runtime }} - block: - - name: Run container - command: - argv: - - "{{ bin_dir }}/crictl" - - run - - --with-pull - - --runtime - - "{{ container_runtime }}" - - /tmp/container.json - - /tmp/sandbox.json - - name: Check log file - slurp: - src: "/tmp/{{ container_runtime }}1.0.log" - register: log_file - failed_when: > - log_file is failed or - 'Hello from Docker' not in (log_file.content | b64decode) - rescue: - - name: Display container manager config on error - command: "{{ bin_dir }}/crictl info" - - name: Check container manager logs - command: journalctl -u {{ container_manager }} - failed_when: true diff --git a/roles/container-engine/validate-container-engine/tasks/main.yml b/roles/container-engine/validate-container-engine/tasks/main.yml index c1e9d45f838..ffb541c2480 100644 --- a/roles/container-engine/validate-container-engine/tasks/main.yml +++ b/roles/container-engine/validate-container-engine/tasks/main.yml @@ -84,7 +84,7 @@ block: - name: Drain node include_role: - name: remove_node/pre_remove + name: remove-node/pre-remove apply: tags: - pre-remove @@ -111,7 +111,7 @@ block: - name: Drain node include_role: - name: remove_node/pre_remove + name: remove-node/pre-remove apply: tags: - pre-remove @@ -137,7 +137,7 @@ block: - name: Drain node include_role: - name: remove_node/pre_remove + name: remove-node/pre-remove apply: tags: - pre-remove diff --git a/roles/container-engine/youki/molecule/default/converge.yml b/roles/container-engine/youki/molecule/default/converge.yml index caa6176559d..11ef8f6bf6c 100644 --- a/roles/container-engine/youki/molecule/default/converge.yml +++ b/roles/container-engine/youki/molecule/default/converge.yml @@ -6,6 +6,6 @@ youki_enabled: true container_manager: crio roles: - - role: kubespray_defaults + - role: kubespray-defaults - role: container-engine/cri-o - role: container-engine/youki diff --git a/roles/container-engine/youki/molecule/default/molecule.yml b/roles/container-engine/youki/molecule/default/molecule.yml index f73a9775cc4..9bf49633149 100644 --- a/roles/container-engine/youki/molecule/default/molecule.yml +++ b/roles/container-engine/youki/molecule/default/molecule.yml @@ -1,18 +1,28 @@ --- role_name_check: 1 +driver: + name: vagrant + provider: + name: libvirt platforms: - - cloud_image: ubuntu-2404 - name: ubuntu24 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + - name: ubuntu20 + box: generic/ubuntu2004 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane + provider_options: + driver: kvm - name: almalinux9 - cloud_image: almalinux-9 - vm_cpu_cores: 1 - vm_memory: 1024 - node_groups: + box: almalinux/9 + cpus: 1 + memory: 1024 + nested: true + groups: - kube_control_plane + provider_options: + driver: kvm provisioner: name: ansible env: @@ -21,8 +31,9 @@ provisioner: defaults: callbacks_enabled: profile_tasks timeout: 120 - playbooks: - create: ../../../../../tests/cloud_playbooks/create-kubevirt.yml - prepare: ../../../molecule/prepare.yml + inventory: + group_vars: + all: + become: true verifier: - name: ansible + name: testinfra diff --git a/roles/container-engine/youki/molecule/default/prepare.yml b/roles/container-engine/youki/molecule/default/prepare.yml new file mode 100644 index 00000000000..a72bdad7f5d --- /dev/null +++ b/roles/container-engine/youki/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- +- name: Prepare generic + hosts: all + become: true + roles: + - role: kubespray-defaults + - role: bootstrap-os + - role: adduser + user: "{{ addusers.kube }}" + tasks: + - name: Download CNI + include_tasks: "../../../../download/tasks/download_file.yml" + vars: + download: "{{ download_defaults | combine(downloads.cni) }}" + +- name: Prepare container runtime + hosts: all + become: true + vars: + container_manager: crio + kube_network_plugin: cni + roles: + - role: kubespray-defaults + - role: network_plugin/cni + - role: container-engine/crictl + tasks: + - name: Copy test container files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + owner: root + mode: "0644" + with_items: + - container.json + - sandbox.json + - name: Create /etc/cni/net.d directory + file: + path: /etc/cni/net.d + state: directory + owner: root + mode: "0755" + - name: Setup CNI + copy: + src: "{{ item }}" + dest: "/etc/cni/net.d/{{ item }}" + owner: root + mode: "0644" + with_items: + - 10-mynet.conf diff --git a/roles/container-engine/youki/molecule/default/tests/test_default.py b/roles/container-engine/youki/molecule/default/tests/test_default.py new file mode 100644 index 00000000000..54ed5c54cbd --- /dev/null +++ b/roles/container-engine/youki/molecule/default/tests/test_default.py @@ -0,0 +1,29 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_run(host): + youkiruntime = "/usr/local/bin/youki" + with host.sudo(): + cmd = host.command(youkiruntime + " --version") + assert cmd.rc == 0 + assert "youki" in cmd.stdout + + +def test_run_pod(host): + runtime = "youki" + + run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime) + with host.sudo(): + cmd = host.command(run_command) + assert cmd.rc == 0 + + with host.sudo(): + log_f = host.file("/tmp/youki1.0.log") + + assert log_f.exists + assert b"Hello from Docker" in log_f.content diff --git a/roles/container-engine/youki/molecule/default/verify.yml b/roles/container-engine/youki/molecule/default/verify.yml deleted file mode 100644 index 75adeb55933..00000000000 --- a/roles/container-engine/youki/molecule/default/verify.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Test youki - hosts: all - gather_facts: false - tasks: - - name: Get kubespray defaults - import_role: - name: ../../../../../kubespray_defaults - - name: Test version - command: "{{ bin_dir }}/youki --version" - register: youki_version - failed_when: > - youki_version is failed or - 'youki' not in youki_version.stdout - -- name: Test run container - import_playbook: ../../../molecule/test_runtime.yml - vars: - container_runtime: youki diff --git a/roles/kubernetes/control-plane/meta/main.yml b/roles/kubernetes/control-plane/meta/main.yml index ceb05687d83..9f460e51153 100644 --- a/roles/kubernetes/control-plane/meta/main.yml +++ b/roles/kubernetes/control-plane/meta/main.yml @@ -6,5 +6,4 @@ dependencies: when: - etcd_deployment_type == "kubeadm" - not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) - - role: network_plugin/calico_defaults - role: etcd_defaults diff --git a/roles/kubespray_defaults/defaults/main/download.yml b/roles/kubespray_defaults/defaults/main/download.yml index 91745660e8d..82436003e1e 100644 --- a/roles/kubespray_defaults/defaults/main/download.yml +++ b/roles/kubespray_defaults/defaults/main/download.yml @@ -114,7 +114,7 @@ flannel_version: 0.26.7 flannel_cni_version: 1.7.1-flannel1 cni_version: "{{ (cni_binary_checksums['amd64'] | dict2items)[0].key }}" -cilium_version: "1.17.7" +cilium_version: "1.18.1" cilium_cli_version: "{{ (ciliumcli_binary_checksums['amd64'] | dict2items)[0].key }}" cilium_enable_hubble: false diff --git a/roles/kubespray_defaults/defaults/main/main.yml b/roles/kubespray_defaults/defaults/main/main.yml index b6795ff40ab..2e3cbadfad4 100644 --- a/roles/kubespray_defaults/defaults/main/main.yml +++ b/roles/kubespray_defaults/defaults/main/main.yml @@ -194,7 +194,7 @@ kube_cert_compat_dir: "/etc/kubernetes/pki" kube_token_dir: "{{ kube_config_dir }}/tokens" # This is the user that owns the cluster installation. -kube_owner: kube +kube_owner: root # This is the group that the cert creation scripts chgrp the # cert files to. Not really changeable... diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 6b5d483b55e..52bd5c452a1 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -353,7 +353,7 @@ {% if not calico_no_global_as_num | default(false) %}"asNumber": {{ global_as_num }},{% endif %} "nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled | default('true') }} , {% if calico_advertise_cluster_ips | default(false) %} - "serviceClusterIPs": + "serviceClusterIPs": >- {%- if ipv4_stack and ipv6_stack-%} [{"cidr": "{{ kube_service_addresses }}", "cidr": "{{ kube_service_addresses_ipv6 }}"}], {%- elif ipv6_stack-%} diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index ad3eefc40b4..d5b509bbafe 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -275,10 +275,6 @@ spec: # Enable or disable usage report - name: FELIX_USAGEREPORTINGENABLED value: "{{ calico_usage_reporting }}" -{% if calico_version is version('3.29.0', '>=') %} - - name: FELIX_NFTABLESMODE - value: "{{ calico_nftable_mode }}" -{% endif %} # Set MTU for tunnel device used if ipip is enabled {% if calico_mtu is defined %} # Set MTU for tunnel device used if ipip is enabled diff --git a/roles/network_plugin/calico_defaults/defaults/main.yml b/roles/network_plugin/calico_defaults/defaults/main.yml index 899a9fd7180..da899546b25 100644 --- a/roles/network_plugin/calico_defaults/defaults/main.yml +++ b/roles/network_plugin/calico_defaults/defaults/main.yml @@ -14,6 +14,9 @@ calico_ipv4pool_ipip: "Off" calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet' calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet' +calico_cni_pool: true +calico_cni_pool_ipv6: true + # add default ippool blockSize calico_pool_blocksize: 26 @@ -98,10 +101,6 @@ calico_iptables_lock_timeout_secs: 10 # Choose Calico iptables backend: "Legacy", "Auto" or "NFT" (FELIX_IPTABLESBACKEND) calico_iptables_backend: "Auto" -# Calico NFTable Mode Support (tech preview 3.29) -# Valid option: Disabled (default), Enabled -calico_nftable_mode: "Disabled" - # Calico Wireguard support calico_wireguard_enabled: false calico_wireguard_packages: [] diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index 42c54e5af6b..9b754b85616 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -1,12 +1,9 @@ --- -cilium_min_version_required: "1.15" - -# remove migrate after 2.29 released -cilium_remove_old_resources: false +cilium_min_version_required: "1.10" # Log-level cilium_debug: false -cilium_mtu: "0" +cilium_mtu: "" cilium_enable_ipv4: "{{ ipv4_stack }}" cilium_enable_ipv6: "{{ ipv6_stack }}" @@ -14,7 +11,7 @@ cilium_enable_ipv6: "{{ ipv6_stack }}" cilium_l2announcements: false # Cilium agent health port -cilium_agent_health_port: "9879" +cilium_agent_health_port: "{%- if cilium_version is version('1.11.6', '>=') -%}9879{%- else -%}9876{%- endif -%}" # Identity allocation mode selects how identities are shared between cilium # nodes by setting how they are stored. The options are "crd" or "kvstore". @@ -29,7 +26,7 @@ cilium_agent_health_port: "9879" # - --synchronize-k8s-nodes # - --identity-allocation-mode=kvstore # - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations -cilium_identity_allocation_mode: crd +cilium_identity_allocation_mode: kvstore # Etcd SSL dirs cilium_cert_dir: /etc/cilium/certs @@ -58,20 +55,24 @@ cilium_enable_prometheus: false cilium_enable_portmap: false # Monitor aggregation level (none/low/medium/maximum) cilium_monitor_aggregation: medium -# Kube Proxy Replacement mode (true/false) -cilium_kube_proxy_replacement: false - -# If not defined `cilium_dns_proxy_enable_transparent_mode`, it will following the Cilium behavior. -# When Cilium is configured to replace kube-proxy, it automatically enables dnsProxy, which will conflict with nodelocaldns. -# You can set `false` avoid conflict with nodelocaldns. -# https://github.com/cilium/cilium/issues/33144 -# cilium_dns_proxy_enable_transparent_mode: +# Kube Proxy Replacement mode (strict/partial) +cilium_kube_proxy_replacement: partial # If upgrading from Cilium < 1.5, you may want to override some of these options # to prevent service disruptions. See also: # http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action cilium_preallocate_bpf_maps: false +# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9 +cilium_tofqdns_enable_poller: false + +# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9 +cilium_enable_legacy_services: false + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +cilium_deploy_additionally: false + # Auto direct nodes routes can be used to advertise pods routes in your cluster # without any tunelling (with `cilium_tunnel_mode` sets to `disabled`). # This works only if you have a L2 connectivity between all your nodes. @@ -103,8 +104,8 @@ cilium_encryption_enabled: false cilium_encryption_type: "ipsec" # Enable encryption for pure node to node traffic. -# This option is only effective when `cilium_encryption_type` is set to `wireguard`. -cilium_encryption_node_encryption: false +# This option is only effective when `cilium_encryption_type` is set to `ipsec`. +cilium_ipsec_node_encryption: false # If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation. # When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard, @@ -118,7 +119,6 @@ cilium_wireguard_userspace_fallback: false # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. cilium_enable_bandwidth_manager: false -cilium_enable_bandwidth_manager_bbr: false # IP Masquerade Agent # https://docs.cilium.io/en/stable/concepts/networking/masquerading/ @@ -141,7 +141,6 @@ cilium_non_masquerade_cidrs: ### Indicates whether to masquerade traffic to the link local prefix. ### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list. cilium_masq_link_local: false -cilium_masq_link_local_ipv6: false ### A time interval at which the agent attempts to reload config from disk cilium_ip_masq_resync_interval: 60s @@ -150,10 +149,10 @@ cilium_ip_masq_resync_interval: 60s cilium_enable_hubble: false ### Enable Hubble-ui cilium_enable_hubble_ui: "{{ cilium_enable_hubble }}" -### Enable Hubble Metrics (deprecated) +### Enable Hubble Metrics cilium_enable_hubble_metrics: false ### if cilium_enable_hubble_metrics: true -cilium_hubble_metrics: [] +cilium_hubble_metrics: {} # - dns # - drop # - tcp @@ -165,29 +164,12 @@ cilium_hubble_install: false ### Enable auto generate certs if cilium_hubble_install: true cilium_hubble_tls_generate: false -cilium_hubble_export_file_max_backups: "5" -cilium_hubble_export_file_max_size_mb: "10" - -cilium_hubble_export_dynamic_enabled: false -cilium_hubble_export_dynamic_config_content: - - name: all - fieldMask: [] - includeFilters: [] - excludeFilters: [] - filePath: "/var/run/cilium/hubble/events.log" - -# Override the DNS suffix that Hubble-Relay uses to resolve its peer service. -# It defaults to the inventory's `dns_domain`. -cilium_hubble_peer_service_cluster_domain: "{{ dns_domain }}" - ### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535 ### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095) # cilium_hubble_event_buffer_capacity: 4095 ### Buffer size of the channel to receive monitor events. # cilium_hubble_event_queue_size: 50 -cilium_gateway_api_enabled: false - # The default IP address management mode is "Cluster Scope". # https://docs.cilium.io/en/stable/concepts/networking/ipam/ cilium_ipam_mode: cluster-pool @@ -212,8 +194,7 @@ cilium_ipam_mode: cluster-pool # Extra arguments for the Cilium agent -cilium_agent_custom_args: [] # deprecated -cilium_agent_extra_args: [] +cilium_agent_custom_args: [] # For adding and mounting extra volumes to the cilium agent cilium_agent_extra_volumes: [] @@ -237,19 +218,9 @@ cilium_operator_extra_volumes: [] cilium_operator_extra_volume_mounts: [] # Extra arguments for the Cilium Operator -cilium_operator_custom_args: [] # deprecated -cilium_operator_extra_args: [] +cilium_operator_custom_args: [] -# Tolerations of the cilium operator -cilium_operator_tolerations: - - operator: "Exists" - -# Unique ID of the cluster. Must be unique across all connected -# clusters and in the range of 1 to 255. Only required for Cluster Mesh, -# may be 0 if Cluster Mesh is not used. -cilium_cluster_id: 0 # Name of the cluster. Only relevant when building a mesh of clusters. -# The "default" name cannot be used if the Cluster ID is different from 0. cilium_cluster_name: default # Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. @@ -292,7 +263,7 @@ cilium_enable_bpf_masquerade: false # host stack (true) or directly and more efficiently out of BPF (false) if # the kernel supports it. The latter has the implication that it will also # bypass netfilter in the host namespace. -cilium_enable_host_legacy_routing: false +cilium_enable_host_legacy_routing: true # -- Enable use of the remote node identity. # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity @@ -336,9 +307,9 @@ cilium_rolling_restart_wait_retries_count: 30 cilium_rolling_restart_wait_retries_delay_seconds: 10 # Cilium changed the default metrics exporter ports in 1.12 -cilium_agent_scrape_port: "9962" -cilium_operator_scrape_port: "9963" -cilium_hubble_scrape_port: "9965" +cilium_agent_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9962', '9090') }}" +cilium_operator_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9963', '6942') }}" +cilium_hubble_scrape_port: "{{ cilium_version is version('1.12', '>=') | ternary('9965', '9091') }}" # Cilium certgen args for generate certificate for hubble mTLS cilium_certgen_args: @@ -357,12 +328,23 @@ cilium_certgen_args: hubble-relay-client-cert-secret-name: hubble-relay-client-certs hubble-relay-server-cert-generate: false +# A list of extra rules variables to add to clusterrole for cilium operator, formatted like: +# cilium_clusterrole_rules_operator_extra_vars: +# - apiGroups: +# - '""' +# resources: +# - pods +# verbs: +# - delete +# - apiGroups: +# - '""' +# resources: +# - nodes +# verbs: +# - list +# - watch +# resourceNames: +# - toto +cilium_clusterrole_rules_operator_extra_vars: [] cilium_enable_host_firewall: false cilium_policy_audit_mode: false - -# Cilium extra install flags -cilium_install_extra_flags: "" - -# Cilium extra values, use any values from cilium Helm Chart -# ref: https://docs.cilium.io/en/stable/helm-reference/ -cilium_extra_values: {} diff --git a/roles/network_plugin/cilium/tasks/apply.yml b/roles/network_plugin/cilium/tasks/apply.yml index 546a0a4920a..e63502f9b25 100644 --- a/roles/network_plugin/cilium/tasks/apply.yml +++ b/roles/network_plugin/cilium/tasks/apply.yml @@ -1,19 +1,14 @@ --- -- name: Check if Cilium Helm release exists (via cilium version) - command: "{{ bin_dir }}/cilium version" - register: cilium_release_info - when: inventory_hostname == groups['kube_control_plane'][0] - failed_when: false - changed_when: false - -- name: Set action to install or upgrade - set_fact: - cilium_action: "{{ 'install' if ('release: not found' in cilium_release_info.stderr | default('') or 'release: not found' in cilium_release_info.stdout | default('')) else 'upgrade' }}" - -- name: Cilium | Install - environment: "{{ proxy_env }}" - command: "{{ bin_dir }}/cilium {{ cilium_action }} --version {{ cilium_version }} -f {{ kube_config_dir }}/cilium-values.yaml -f {{ kube_config_dir }}/cilium-extra-values.yaml {{ cilium_install_extra_flags }}" - when: inventory_hostname == groups['kube_control_plane'][0] +- name: Cilium | Start Resources + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_node_manifests.results }}" + when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - name: Cilium | Wait for pods to run command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa literal-compare @@ -24,6 +19,19 @@ failed_when: false when: inventory_hostname == groups['kube_control_plane'][0] +- name: Cilium | Hubble install + kube: + name: "{{ item.item.name }}" + namespace: "kube-system" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/hubble/{{ item.item.name }}-{{ item.item.file }}" + state: "latest" + loop: "{{ cilium_hubble_manifests.results }}" + when: + - inventory_hostname == groups['kube_control_plane'][0] and not item is skipped + - cilium_enable_hubble and cilium_hubble_install + - name: Cilium | Wait for CiliumLoadBalancerIPPool CRD to be present command: "{{ kubectl }} wait --for condition=established --timeout=60s crd/ciliumloadbalancerippools.cilium.io" register: cillium_lbippool_crd_ready diff --git a/roles/network_plugin/cilium/tasks/check.yml b/roles/network_plugin/cilium/tasks/check.yml index 7471fe36d01..11fcb23fb68 100644 --- a/roles/network_plugin/cilium/tasks/check.yml +++ b/roles/network_plugin/cilium/tasks/check.yml @@ -18,13 +18,13 @@ when: - cilium_ipsec_enabled is defined - cilium_ipsec_enabled - - kube_network_plugin == 'cilium' or cilium_deploy_additionally + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool - name: Stop if kernel version is too low for Cilium Wireguard encryption assert: that: ansible_kernel.split('-')[0] is version('5.6.0', '>=') when: - - kube_network_plugin == 'cilium' or cilium_deploy_additionally + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool - cilium_encryption_enabled - cilium_encryption_type == "wireguard" - not ignore_assert_errors @@ -48,7 +48,7 @@ msg: "cilium_encryption_type must be either 'ipsec' or 'wireguard'" when: cilium_encryption_enabled -- name: Stop if cilium_version is < {{ cilium_min_version_required }} +- name: Stop if cilium_version is < 1.10.0 assert: that: cilium_version is version(cilium_min_version_required, '>=') msg: "cilium_version is too low. Minimum version {{ cilium_min_version_required }}" diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml index 252ba093f4a..e6e7e31b4f7 100644 --- a/roles/network_plugin/cilium/tasks/install.yml +++ b/roles/network_plugin/cilium/tasks/install.yml @@ -30,28 +30,64 @@ when: - cilium_identity_allocation_mode == "kvstore" -- name: Cilium | Enable portmap addon +- name: Cilium | Create hubble dir + file: + path: "{{ kube_config_dir }}/addons/hubble" + state: directory + owner: root + group: root + mode: "0755" + when: + - inventory_hostname == groups['kube_control_plane'][0] + - cilium_hubble_install + +- name: Cilium | Create Cilium node manifests template: - src: 000-cilium-portmap.conflist.j2 - dest: /etc/cni/net.d/000-cilium-portmap.conflist + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.name }}-{{ item.file }}" mode: "0644" - when: cilium_enable_portmap + loop: + - {name: cilium, file: config.yml, type: cm} + - {name: cilium-operator, file: crb.yml, type: clusterrolebinding} + - {name: cilium-operator, file: cr.yml, type: clusterrole} + - {name: cilium, file: crb.yml, type: clusterrolebinding} + - {name: cilium, file: cr.yml, type: clusterrole} + - {name: cilium, file: secret.yml, type: secret, when: "{{ cilium_encryption_enabled and cilium_encryption_type == 'ipsec' }}"} + - {name: cilium, file: ds.yml, type: ds} + - {name: cilium-operator, file: deploy.yml, type: deploy} + - {name: cilium-operator, file: sa.yml, type: sa} + - {name: cilium, file: sa.yml, type: sa} + register: cilium_node_manifests + when: + - ('kube_control_plane' in group_names) + - item.when | default(True) | bool -- name: Cilium | Render values +- name: Cilium | Create Cilium Hubble manifests template: - src: values.yaml.j2 - dest: "{{ kube_config_dir }}/cilium-values.yaml" + src: "{{ item.name }}/{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/hubble/{{ item.name }}-{{ item.file }}" mode: "0644" + loop: + - {name: hubble, file: config.yml, type: cm} + - {name: hubble, file: crb.yml, type: clusterrolebinding} + - {name: hubble, file: cr.yml, type: clusterrole} + - {name: hubble, file: cronjob.yml, type: cronjob, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: deploy.yml, type: deploy} + - {name: hubble, file: job.yml, type: job, when: "{{ cilium_hubble_tls_generate }}"} + - {name: hubble, file: sa.yml, type: sa} + - {name: hubble, file: service.yml, type: service} + register: cilium_hubble_manifests when: - inventory_hostname == groups['kube_control_plane'][0] + - cilium_enable_hubble and cilium_hubble_install + - item.when | default(True) | bool -- name: Cilium | Copy extra values - copy: - content: "{{ cilium_extra_values | to_nice_yaml(indent=2) }}" - dest: "{{ kube_config_dir }}/cilium-extra-values.yaml" +- name: Cilium | Enable portmap addon + template: + src: 000-cilium-portmap.conflist.j2 + dest: /etc/cni/net.d/000-cilium-portmap.conflist mode: "0644" - when: - - inventory_hostname == groups['kube_control_plane'][0] + when: cilium_enable_portmap - name: Cilium | Copy Ciliumcli binary from download dir copy: diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml index dcdad1f94b8..8123c5a4c49 100644 --- a/roles/network_plugin/cilium/tasks/main.yml +++ b/roles/network_plugin/cilium/tasks/main.yml @@ -5,10 +5,5 @@ - name: Cilium install include_tasks: install.yml -# Remove after 2.29 released -- name: Cilium remove old resources - when: cilium_remove_old_resources - include_tasks: remove_old_resources.yml - - name: Cilium apply include_tasks: apply.yml diff --git a/roles/network_plugin/cilium/tasks/remove_old_resources.yml b/roles/network_plugin/cilium/tasks/remove_old_resources.yml deleted file mode 100644 index 93bbcafac58..00000000000 --- a/roles/network_plugin/cilium/tasks/remove_old_resources.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -# Remove after 2.29 released -- name: Cilium | Delete Old Resource - command: | - {{ kubectl }} delete {{ item.kind | lower }} {{ item.name }} \ - {{ '-n kube-system' if item.kind not in ['ClusterRole', 'ClusterRoleBinding'] else '' }} \ - loop: - - { kind: ServiceAccount, name: cilium } - - { kind: ServiceAccount, name: cilium-operator } - - { kind: ServiceAccount, name: hubble-generate-certs } - - { kind: ServiceAccount, name: hubble-relay } - - { kind: ServiceAccount, name: hubble-ui } - - { kind: Service, name: hubble-metrics } - - { kind: Service, name: hubble-relay-metrics } - - { kind: Service, name: hubble-relay } - - { kind: Service, name: hubble-ui } - - { kind: Service, name: hubble-peer } - - { kind: Deployment, name: cilium-operator } - - { kind: Deployment, name: hubble-relay } - - { kind: Deployment, name: hubble-ui } - - { kind: DaemonSet, name: cilium } - - { kind: CronJob, name: hubble-generate-certs } - - { kind: Job, name: hubble-generate-certs } - - { kind: ConfigMap, name: cilium-config } - - { kind: ConfigMap, name: ip-masq-agent } - - { kind: ConfigMap, name: hubble-relay-config } - - { kind: ConfigMap, name: hubble-ui-nginx } - - { kind: ClusterRole, name: cilium } - - { kind: ClusterRole, name: cilium-operator } - - { kind: ClusterRole, name: hubble-generate-certs } - - { kind: ClusterRole, name: hubble-relay } - - { kind: ClusterRole, name: hubble-ui } - - { kind: ClusterRoleBinding, name: cilium } - - { kind: ClusterRoleBinding, name: cilium-operator } - - { kind: ClusterRoleBinding, name: hubble-generate-certs } - - { kind: ClusterRoleBinding, name: hubble-relay } - - { kind: ClusterRoleBinding, name: hubble-ui } - - { kind: Secret, name: hubble-ca-secret } - - { kind: Secret, name: hubble-relay-client-certs } - - { kind: Secret, name: hubble-server-certs } - register: patch_result - when: inventory_hostname == groups['kube_control_plane'][0] - failed_when: - - patch_result.rc != 0 - - "'not found' not in patch_result.stderr" diff --git a/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 new file mode 100644 index 00000000000..038d25fa881 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/cr.yml.j2 @@ -0,0 +1,193 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers +{% if cilium_version is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumloadbalancerippools + - ciliumloadbalancerippools/status + - ciliumbgppeeringpolicies + - ciliumenvoyconfigs +{% endif %} +{% if cilium_version is version('1.15', '>=') %} + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs +{% endif %} +{% if cilium_version is version('1.16', '>=') %} + - ciliumbgpclusterconfigs + - ciliumbgpclusterconfigs/status + - ciliumbgpnodeconfigoverrides +{% endif %} + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +{% if cilium_version is version('1.12', '>=') %} +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumbgploadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumegressnatpolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io +{% if cilium_version is version('1.14', '>=') %} + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io + - ciliumloadbalancerippools.cilium.io +{% endif %} +{% if cilium_version is version('1.15', '>=') %} + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io +{% endif %} +{% endif %} +{% for rules in cilium_clusterrole_rules_operator_extra_vars %} +- apiGroups: +{% for api in rules['apiGroups'] %} + - {{ api }} +{% endfor %} + resources: +{% for resource in rules['resources'] %} + - {{ resource }} +{% endfor %} + verbs: +{% for verb in rules['verbs'] %} + - {{ verb }} +{% endfor %} +{% if 'resourceNames' in rules %} + resourceNames: +{% for resourceName in rules['resourceNames'] %} + - {{ resourceName }} +{% endfor %} +{% endif %} +{% endfor %} diff --git a/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 new file mode 100644 index 00000000000..00f08353531 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 new file mode 100644 index 00000000000..421b908b66f --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/deploy.yml.j2 @@ -0,0 +1,170 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: +{% if groups.k8s_cluster | length == 1 %} + replicas: 1 +{% else %} + replicas: {{ cilium_operator_replicas }} +{% endif %} + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: +{% if cilium_enable_prometheus %} + annotations: + prometheus.io/port: "{{ cilium_operator_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + labels: + io.cilium/app: operator + name: cilium-operator + spec: + containers: + - name: cilium-operator + image: "{{ cilium_operator_image_repo }}:{{ cilium_operator_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-operator + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) +{% if cilium_operator_custom_args is string %} + - {{ cilium_operator_custom_args }} +{% else %} +{% for flag in cilium_operator_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_ACCESS_KEY_ID + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_SECRET_ACCESS_KEY + optional: true + - name: AWS_DEFAULT_REGION + valueFrom: + secretKeyRef: + name: cilium-aws + key: AWS_DEFAULT_REGION + optional: true +{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% if cilium_enable_prometheus %} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: prometheus + containerPort: {{ cilium_operator_scrape_port }} + hostPort: {{ cilium_operator_scrape_port }} + protocol: TCP +{% endif %} + livenessProbe: + httpGet: +{% if cilium_enable_ipv4 %} + host: 127.0.0.1 +{% else %} + host: '::1' +{% endif %} + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{ cilium_cert_dir }}" + readOnly: true +{% endif %} +{% for volume_mount in cilium_operator_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(14) }} +{% endfor %} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + tolerations: + - operator: Exists + volumes: + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + defaultMode: 420 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{ cilium_cert_dir }}" +{% endif %} +{% for volume in cilium_operator_extra_volumes %} + - {{ volume | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} diff --git a/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 new file mode 100644 index 00000000000..c5d1893643b --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 b/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 index 8a2a84031e4..827b2f3ca68 100644 --- a/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium/cilium-loadbalancer-ip-pool.yml.j2 @@ -6,11 +6,7 @@ metadata: name: "{{ cilium_loadbalancer_ip_pool.name }}" spec: blocks: -{% for cblock in cilium_loadbalancer_ip_pool.cidrs | default([]) %} +{% for cblock in cilium_loadbalancer_ip_pool.cidrs %} - cidr: "{{ cblock }}" {% endfor %} -{% for rblock in cilium_loadbalancer_ip_pool.ranges | default([]) %} - - start: "{{ rblock.start }}" - stop: "{{ rblock.stop | default(rblock.start) }}" -{% endfor %} {% endfor %} diff --git a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 new file mode 100644 index 00000000000..83bae464556 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 @@ -0,0 +1,299 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + identity-allocation-mode: {{ cilium_identity_allocation_mode }} + +{% if cilium_identity_allocation_mode == "kvstore" %} + # This etcd-config contains the etcd endpoints of your cluster. If you use + # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config + etcd-config: |- + --- + endpoints: +{% for ip_addr in etcd_access_addresses.split(',') %} + - {{ ip_addr }} +{% endfor %} + + # In case you want to use TLS in etcd, uncomment the 'ca-file' line + # and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config +{% if cilium_version | regex_replace('v') is version('1.17.0', '>=') %} + trusted-ca-file: "{{ cilium_cert_dir }}/ca_cert.crt" +{% else %} + ca-file: "{{ cilium_cert_dir }}/ca_cert.crt" +{% endif %} + + # In case you want client to server authentication, uncomment the following + # lines and create a kubernetes secret by following the tutorial in + # https://cilium.link/etcd-config + key-file: "{{ cilium_cert_dir }}/key.pem" + cert-file: "{{ cilium_cert_dir }}/cert.crt" + + # kvstore + # https://docs.cilium.io/en/latest/cmdref/kvstore/ + kvstore: etcd + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' +{% endif %} + + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. +{% if cilium_enable_prometheus %} + prometheus-serve-addr: ":{{ cilium_agent_scrape_port }}" + operator-prometheus-serve-addr: ":{{ cilium_operator_scrape_port }}" + enable-metrics: "true" +{% endif %} + + # If you want to run cilium in debug mode change this value to true + debug: "{{ cilium_debug }}" + enable-ipv4: "{{ cilium_enable_ipv4 }}" + enable-ipv6: "{{ cilium_enable_ipv6 }}" + # If a serious issue occurs during Cilium startup, this + # invasive option may be set to true to remove all persistent + # state. Endpoints will not be restored using knowledge from a + # prior Cilium run, so they may receive new IP addresses upon + # restart. This also triggers clean-cilium-bpf-state. + clean-cilium-state: "false" + # If you want to clean cilium BPF state, set this to true; + # Removes all BPF maps from the filesystem. Upon restart, + # endpoints are restored with the same IP addresses, however + # any ongoing connections may be disrupted briefly. + # Loadbalancing decisions will be reset, so any ongoing + # connections via a service may be loadbalanced to a different + # backend after restart. + clean-cilium-bpf-state: "false" + + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "false" + +{% if cilium_version is version('1.14.0', '>=') %} + # Tell the agent to generate and write a CNI configuration file + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist + cni-exclusive: "{{ cilium_cni_exclusive }}" + cni-log-file: "{{ cilium_cni_log_file }}" +{% endif %} + + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: "{{ cilium_monitor_aggregation }}" + + # ct-global-max-entries-* specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. This may lead to brief + # policy drops or a change in loadbalancing decisions for a connection. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, comment out these options. + bpf-ct-global-tcp-max: "524288" + bpf-ct-global-any-max: "262144" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # This may lead to policy drops or a change in loadbalancing decisions for a + # connection for some time. Endpoints may need to be recreated to restore + # connectivity. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{ cilium_preallocate_bpf_maps }}" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve +{% if cilium_version is version('1.14.0', '<') %} + tunnel: "{{ cilium_tunnel_mode }}" +{% elif cilium_version is version('1.14.0', '>=') and cilium_tunnel_mode == 'disabled' %} + routing-mode: 'native' +{% elif cilium_version is version('1.14.0', '>=') and cilium_tunnel_mode != 'disabled' %} + routing-mode: 'tunnel' + tunnel-protocol: "{{ cilium_tunnel_mode }}" +{% endif %} + + ## DSR setting + bpf-lb-mode: "{{ cilium_loadbalancer_mode }}" + + # l2 + enable-l2-announcements: "{{ cilium_l2announcements }}" + + # Enable Bandwidth Manager + # Cilium's bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. + # Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. + # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. + # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +{% if cilium_enable_bandwidth_manager %} + enable-bandwidth-manager: "true" +{% endif %} + + # Host Firewall and Policy Audit Mode + enable-host-firewall: "{{ cilium_enable_host_firewall | capitalize }}" + policy-audit-mode: "{{ cilium_policy_audit_mode | capitalize }}" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: "{{ cilium_cluster_name }}" + + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + #cluster-id: 1 +{% if cilium_cluster_id is defined %} + cluster-id: "{{ cilium_cluster_id }}" +{% endif %} + +# `wait-bpf-mount` is removed after v1.10.4 +# https://github.com/cilium/cilium/commit/d2217045cb3726a7f823174e086913b69b8090da +{% if cilium_version is version('1.10.4', '<') %} + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" +{% endif %} + +# `kube-proxy-replacement=partial|strict|disabled` is deprecated since january 2024 and unsupported in 1.16. +# Replaced by `kube-proxy-replacement=true|false` +# https://github.com/cilium/cilium/pull/31286 +{% if cilium_version is version('1.16', '<') %} + kube-proxy-replacement: "{{ cilium_kube_proxy_replacement }}" +{% else %} + kube-proxy-replacement: "{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %}true{% else %}false{% endif %}" +{% endif %} + +# `native-routing-cidr` is deprecated in 1.10, removed in 1.12. +# Replaced by `ipv4-native-routing-cidr` +# https://github.com/cilium/cilium/pull/16695 +{% if cilium_version is version('1.12', '<') %} + native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% else %} +{% if cilium_native_routing_cidr | length %} + ipv4-native-routing-cidr: "{{ cilium_native_routing_cidr }}" +{% endif %} +{% if cilium_native_routing_cidr_ipv6 | length %} + ipv6-native-routing-cidr: "{{ cilium_native_routing_cidr_ipv6 }}" +{% endif %} +{% endif %} + + auto-direct-node-routes: "{{ cilium_auto_direct_node_routes }}" + + operator-api-serve-addr: "{{ cilium_operator_api_serve_addr }}" + + # Hubble settings +{% if cilium_enable_hubble %} + enable-hubble: "true" +{% if cilium_enable_hubble_metrics %} + hubble-metrics-server: ":{{ cilium_hubble_scrape_port }}" + hubble-metrics: +{% for hubble_metrics_cycle in cilium_hubble_metrics %} + {{ hubble_metrics_cycle }} +{% endfor %} +{% endif %} +{% if cilium_hubble_event_buffer_capacity is defined %} + hubble-event-buffer-capacity: "{{ cilium_hubble_event_buffer_capacity }}" +{% endif %} +{% if cilium_hubble_event_queue_size is defined %} + hubble-event-queue-size: "{{ cilium_hubble_event_queue_size }}" +{% endif %} + hubble-listen-address: ":4244" +{% if cilium_enable_hubble and cilium_hubble_install %} + hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt +{% endif %} +{% endif %} + + # IP Masquerade Agent + enable-ip-masq-agent: "{{ cilium_ip_masq_agent_enable }}" + +{% for key, value in cilium_config_extra_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} + + # Enable transparent network encryption +{% if cilium_encryption_enabled %} +{% if cilium_encryption_type == "ipsec" %} + enable-ipsec: "true" + ipsec-key-file: /etc/ipsec/keys + encrypt-node: "{{ cilium_ipsec_node_encryption }}" +{% endif %} + +{% if cilium_encryption_type == "wireguard" %} + enable-wireguard: "true" + enable-wireguard-userspace-fallback: "{{ cilium_wireguard_userspace_fallback }}" +{% endif %} +{% endif %} + + # IPAM settings + ipam: "{{ cilium_ipam_mode }}" +{% if cilium_ipam_mode == "cluster-pool" %} + cluster-pool-ipv4-cidr: "{{ cilium_pool_cidr | default(kube_pods_subnet) }}" + cluster-pool-ipv4-mask-size: "{{ cilium_pool_mask_size | default(kube_network_node_prefix) }}" +{% if cilium_enable_ipv6 %} + cluster-pool-ipv6-cidr: "{{ cilium_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" + cluster-pool-ipv6-mask-size: "{{ cilium_pool_mask_size_ipv6 | default(kube_network_node_prefix_ipv6) }}" +{% endif %} +{% endif %} + + agent-health-port: "{{ cilium_agent_health_port }}" + +{% if cilium_version is version('1.11', '>=') and cilium_cgroup_host_root != '' %} + cgroup-root: "{{ cilium_cgroup_host_root }}" +{% endif %} + + bpf-map-dynamic-size-ratio: "{{ cilium_bpf_map_dynamic_size_ratio }}" + + enable-ipv4-masquerade: "{{ cilium_enable_ipv4_masquerade }}" + enable-ipv6-masquerade: "{{ cilium_enable_ipv6_masquerade }}" + + enable-bpf-masquerade: "{{ cilium_enable_bpf_masquerade }}" + + enable-host-legacy-routing: "{{ cilium_enable_host_legacy_routing }}" + + enable-remote-node-identity: "{{ cilium_enable_remote_node_identity }}" + + enable-well-known-identities: "{{ cilium_enable_well_known_identities }}" + + monitor-aggregation-flags: "{{ cilium_monitor_aggregation_flags }}" + + enable-bpf-clock-probe: "{{ cilium_enable_bpf_clock_probe }}" + + enable-bgp-control-plane: "{{ cilium_enable_bgp_control_plane }}" + + disable-cnp-status-updates: "{{ cilium_disable_cnp_status_updates }}" +{% if cilium_ip_masq_agent_enable %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ip-masq-agent + namespace: kube-system +data: + config: | + nonMasqueradeCIDRs: +{% for cidr in cilium_non_masquerade_cidrs %} + - {{ cidr }} +{% endfor %} + masqLinkLocal: {{ cilium_masq_link_local | bool }} + resyncInterval: "{{ cilium_ip_masq_resync_interval }}" +{% endif %} diff --git a/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 new file mode 100644 index 00000000000..055c0f43c23 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/cr.yml.j2 @@ -0,0 +1,166 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +{% if cilium_version is version('1.12', '<') %} +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch + - update +{% endif %} +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + # Deprecated for removal in v1.10 + - create + - list + - watch + - update + + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints + - ciliumendpoints/status + - ciliumnodes + - ciliumnodes/status + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumegressnatpolicies +{% if cilium_version is version('1.11', '>=') %} + - ciliumendpointslices +{% endif %} +{% if cilium_version is version('1.12', '>=') %} + - ciliumbgploadbalancerippools + - ciliumbgppeeringpolicies +{% if cilium_version is version('1.13', '>=') %} + - ciliumloadbalancerippools +{% endif %} +{% endif %} +{% if cilium_version is version('1.11.5', '<') %} + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints/finalizers + - ciliumnodes/finalizers + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies/finalizers +{% endif %} +{% if cilium_version is version('1.14', '>=') %} + - ciliuml2announcementpolicies/status +{% endif %} +{% if cilium_version is version('1.15', '>=') %} + - ciliumbgpnodeconfigs + - ciliumbgpnodeconfigs/status + - ciliumbgpadvertisements + - ciliumbgppeerconfigs +{% endif %} +{% if cilium_version is version('1.16', '>=') %} + - ciliumbgpclusterconfigs +{% endif %} + verbs: + - '*' +{% if cilium_version is version('1.12', '>=') %} +- apiGroups: + - cilium.io + resources: + - ciliumclusterwideenvoyconfigs + - ciliumenvoyconfigs + - ciliumegressgatewaypolicies + verbs: + - list + - watch +{% endif %} +{% if cilium_version is version('1.14', '>=') %} +- apiGroups: + - cilium.io + resources: + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumloadbalancerippools + - ciliuml2announcementpolicies/status + verbs: + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - list + - delete +{% endif %} diff --git a/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 new file mode 100644 index 00000000000..d23897fa04b --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 new file mode 100644 index 00000000000..8371d85d106 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/ds.yml.j2 @@ -0,0 +1,446 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cilium + namespace: kube-system + labels: + k8s-app: cilium +spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + # Specifies the maximum number of Pods that can be unavailable during the update process. + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: +{% if cilium_enable_prometheus %} + prometheus.io/port: "{{ cilium_agent_scrape_port }}" + prometheus.io/scrape: "true" +{% endif %} + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' + labels: + k8s-app: cilium + spec: + containers: + - name: cilium-agent + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map +{% if cilium_mtu != "" %} + - --mtu={{ cilium_mtu }} +{% endif %} +{% if cilium_agent_custom_args is string %} + - {{ cilium_agent_custom_args }} +{% else %} +{% for flag in cilium_agent_custom_args %} + - {{ flag }} +{% endfor %} +{% endif %} + startupProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 105 + periodSeconds: 2 + successThreshold: 1 + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: {{ cilium_agent_health_port }} + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ +{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} +{% for env_var in cilium_agent_extra_env_vars %} + - {{ env_var | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} + lifecycle: +{% if cilium_version is version('1.14', '<') %} + postStart: + exec: + command: + - "/cni-install.sh" + - "--cni-exclusive={{ cilium_cni_exclusive | string | lower }}" +{% if cilium_version is version('1.12', '>=') %} + - "--enable-debug={{ cilium_debug | string | lower }}" + - "--log-file={{ cilium_cni_log_file }}" +{% endif %} +{% endif %} + preStop: + exec: + command: + - /cni-uninstall.sh + resources: + limits: + cpu: {{ cilium_cpu_limit }} + memory: {{ cilium_memory_limit }} + requests: + cpu: {{ cilium_cpu_requests }} + memory: {{ cilium_memory_requests }} +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} + ports: +{% endif %} +{% if cilium_enable_prometheus %} + - name: prometheus + containerPort: {{ cilium_agent_scrape_port }} + hostPort: {{ cilium_agent_scrape_port }} + protocol: TCP +{% endif %} +{% if cilium_enable_hubble_metrics %} + - name: hubble-metrics + containerPort: {{ cilium_hubble_scrape_port }} + hostPort: {{ cilium_hubble_scrape_port }} + protocol: TCP +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + - name: cilium-run + mountPath: /var/run/cilium +{% if cilium_version is version('1.13.1', '<') %} + - name: cni-path + mountPath: /host/opt/cni/bin +{% endif %} + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d +{% if cilium_identity_allocation_mode == "kvstore" %} + - name: etcd-config-path + mountPath: /var/lib/etcd-config + readOnly: true + - name: etcd-secrets + mountPath: "{{ cilium_cert_dir }}" + readOnly: true +{% endif %} + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh + readOnly: true + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + mountPath: /etc/config + readOnly: true +{% endif %} + # Needed to be able to load kernel modules + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + mountPath: /etc/ipsec + readOnly: true +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + mountPath: /var/lib/cilium/tls/hubble + readOnly: true +{% endif %} +{% for volume_mount in cilium_agent_extra_volume_mounts %} + - {{ volume_mount | to_nice_yaml(indent=2) | indent(10) }} +{% endfor %} +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + hostNetwork: true + initContainers: +{% if cilium_version is version('1.11', '>=') and cilium_cgroup_auto_mount %} + - name: mount-cgroup + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: CGROUP_ROOT + value: {{ cilium_cgroup_host_root }} + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} +{% if cilium_version is version('1.11.7', '>=') %} + - name: apply-sysctl-overwrites + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + securityContext: + privileged: true +{% endif %} + - name: clean-cilium-state + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-state + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-bpf-state + optional: true +# Removed in 1.11 and up. +# https://github.com/cilium/cilium/commit/f7a3f59fd74983c600bfce9cac364b76d20849d9 +{% if cilium_version is version('1.11', '<') %} + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true +{% endif %} +{% if (cilium_kube_proxy_replacement == 'strict') or (cilium_kube_proxy_replacement | bool) or (cilium_kube_proxy_replacement | string | lower == 'true') %} + - name: KUBERNETES_SERVICE_HOST + value: "{{ kube_apiserver_global_endpoint | urlsplit('hostname') }}" + - name: KUBERNETES_SERVICE_PORT + value: "{{ kube_apiserver_global_endpoint | urlsplit('port') }}" +{% endif %} + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf +{% if cilium_version is version('1.11', '>=') %} + # Required to mount cgroup filesystem from the host to cilium agent pod + - name: cilium-cgroup + mountPath: {{ cilium_cgroup_host_root }} + mountPropagation: HostToContainer +{% endif %} + - name: cilium-run + mountPath: /var/run/cilium + resources: + requests: + cpu: 100m + memory: 100Mi +{% if cilium_version is version('1.13.1', '>=') %} + # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent + - name: install-cni-binaries + image: "{{ cilium_image_repo }}:{{ cilium_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/install-plugin.sh" + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin +{% endif %} + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + hostNetwork: true +# In managed etcd mode, Cilium must be able to resolve the DNS name of the etcd service +{% if cilium_identity_allocation_mode == "kvstore" %} + dnsPolicy: ClusterFirstWithHostNet +{% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + # To keep state between restarts / upgrades for bpf maps + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate +{% if cilium_version is version('1.11', '>=') %} + # To mount cgroup2 filesystem on the host + - name: hostproc + hostPath: + path: /proc + type: Directory + # To keep state between restarts / upgrades for cgroup2 filesystem + - name: cilium-cgroup + hostPath: + path: {{ cilium_cgroup_host_root }} + type: DirectoryOrCreate +{% endif %} + # To install cilium cni plugin in the host + - name: cni-path + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + # To install cilium cni configuration in the host + - name: etc-cni-netd + hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + # To be able to load kernel modules + - name: lib-modules + hostPath: + path: /lib/modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +{% if cilium_identity_allocation_mode == "kvstore" %} + # To read the etcd config stored in config maps + - name: etcd-config-path + configMap: + name: cilium-config + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + items: + - key: etcd-config + path: etcd.config + # To read the k8s etcd secrets in case the user might want to use TLS + - name: etcd-secrets + hostPath: + path: "{{ cilium_cert_dir }}" +{% endif %} + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + secretName: cilium-clustermesh + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + optional: true + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config +{% if cilium_ip_masq_agent_enable %} + - name: ip-masq-agent + configMap: + name: ip-masq-agent + optional: true + items: + - key: config + path: ip-masq-agent +{% endif %} +{% if cilium_encryption_enabled and cilium_encryption_type == "ipsec" %} + - name: cilium-ipsec-secrets + secret: + secretName: cilium-ipsec-keys +{% endif %} +{% if cilium_hubble_install %} + - name: hubble-tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: hubble-server-certs + optional: true + items: + - key: ca.crt + path: client-ca.crt + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key +{% endif %} diff --git a/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 new file mode 100644 index 00000000000..c03ac59b49b --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system diff --git a/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 b/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 new file mode 100644 index 00000000000..776c6893800 --- /dev/null +++ b/roles/network_plugin/cilium/templates/cilium/secret.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +data: + keys: {{ cilium_ipsec_key }} +kind: Secret +metadata: + name: cilium-ipsec-keys + namespace: kube-system +type: Opaque diff --git a/roles/network_plugin/cilium/templates/hubble/config.yml.j2 b/roles/network_plugin/cilium/templates/hubble/config.yml.j2 new file mode 100644 index 00000000000..f3af7174110 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/config.yml.j2 @@ -0,0 +1,71 @@ +#jinja2: trim_blocks:False +--- +# Source: cilium helm chart: cilium/templates/hubble-relay/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: kube-system +data: + config.yaml: | + cluster-name: "{{ cilium_cluster_name }}" + peer-service: "hubble-peer.kube-system.svc.{{ dns_domain }}:443" + listen-address: :4245 + metrics-listen-address: ":9966" + dial-timeout: + retry-timeout: + sort-buffer-len-max: + sort-buffer-drain-timeout: + tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt + tls-server-key-file: /var/lib/hubble-relay/tls/server.key + tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt + disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} + disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} +--- +# Source: cilium/templates/hubble-ui/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-ui-nginx + namespace: kube-system +data: + nginx.conf: | + server { + listen 8081; + {% if cilium_enable_ipv6 %} + listen [::]:8081; + {% endif %} + server_name localhost; + root /app; + index index.html; + client_max_body_size 1G; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + + # CORS + add_header Access-Control-Allow-Methods "GET, POST, PUT, HEAD, DELETE, OPTIONS"; + add_header Access-Control-Allow-Origin *; + add_header Access-Control-Max-Age 1728000; + add_header Access-Control-Expose-Headers content-length,grpc-status,grpc-message; + add_header Access-Control-Allow-Headers range,keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout; + if ($request_method = OPTIONS) { + return 204; + } + # /CORS + + location /api { + proxy_http_version 1.1; + proxy_pass_request_headers on; + proxy_hide_header Access-Control-Allow-Origin; + proxy_pass http://127.0.0.1:8090; + } + + location / { + try_files $uri $uri/ /index.html; + } + } + } diff --git a/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 b/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 new file mode 100644 index 00000000000..ee974b5e35b --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/cr.yml.j2 @@ -0,0 +1,108 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: hubble-generate-certs +rules: + - apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-server-certs + - hubble-relay-client-certs + - hubble-relay-server-certs + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - hubble-ca-cert + verbs: + - update + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - hubble-ca-secret + verbs: + - get +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +rules: + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +rules: + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - componentstatuses + - endpoints + - namespaces + - nodes + - pods + - services + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - "*" + verbs: + - get + - list + - watch +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 b/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 new file mode 100644 index 00000000000..e5b8976e80e --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/crb.yml.j2 @@ -0,0 +1,46 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hubble-generate-certs +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-generate-certs +subjects: +- kind: ServiceAccount + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-relay +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-relay +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-relay +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hubble-ui +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hubble-ui +subjects: +- kind: ServiceAccount + namespace: kube-system + name: hubble-ui +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 b/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 new file mode 100644 index 00000000000..8010c5252f0 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/cronjob.yml.j2 @@ -0,0 +1,38 @@ +--- +# Source: cilium/templates/hubble-generate-certs-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + schedule: "0 0 1 */4 *" + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + {% for key, value in cilium_certgen_args.items() -%} + - "--{{ key }}={{ value }}" + {% endfor %} + + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 b/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 new file mode 100644 index 00000000000..fbd3b2fa859 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/deploy.yml.j2 @@ -0,0 +1,199 @@ +--- +# Source: cilium/templates/hubble-relay-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hubble-relay + labels: + k8s-app: hubble-relay + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + k8s-app: hubble-relay + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "k8s-app" + operator: In + values: + - cilium + topologyKey: "kubernetes.io/hostname" + containers: + - name: hubble-relay + image: "{{ cilium_hubble_relay_image_repo }}:{{ cilium_hubble_relay_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - hubble-relay + args: + - serve + ports: + - name: grpc + containerPort: 4245 +{% if cilium_enable_prometheus %} + - name: prometheus + containerPort: 9966 + protocol: TCP +{% endif %} + readinessProbe: + tcpSocket: + port: grpc + livenessProbe: + tcpSocket: + port: grpc + volumeMounts: + - mountPath: /var/run/cilium + name: hubble-sock-dir + readOnly: true + - mountPath: /etc/hubble-relay + name: config + readOnly: true + {% if cilium_hubble_tls_generate -%} + - mountPath: /var/lib/hubble-relay/tls + name: tls + readOnly: true + {%- endif %} + + restartPolicy: Always + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 0 + volumes: + - configMap: + name: hubble-relay-config + items: + - key: config.yaml + path: config.yaml + name: config + - hostPath: + path: /var/run/cilium + type: Directory + name: hubble-sock-dir + {% if cilium_hubble_tls_generate -%} + - projected: + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: ca.crt + path: hubble-server-ca.crt + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + - secret: + name: hubble-server-certs + items: + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key + name: tls + {%- endif %} + +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui/deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + namespace: kube-system + labels: + k8s-app: hubble-ui + name: hubble-ui +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-ui + template: + metadata: + annotations: + labels: + k8s-app: hubble-ui + spec: + securityContext: + runAsUser: 1001 + serviceAccount: hubble-ui + serviceAccountName: hubble-ui + containers: + - name: frontend + image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + ports: + - containerPort: 8081 + name: http + volumeMounts: + - name: hubble-ui-nginx-conf + mountPath: /etc/nginx/conf.d/default.conf + subPath: nginx.conf + - name: tmp-dir + mountPath: /tmp + resources: + {} + - name: backend + image: "{{ cilium_hubble_ui_backend_image_repo }}:{{ cilium_hubble_ui_backend_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + env: + - name: EVENTS_SERVER_PORT + value: "8090" + {% if cilium_hubble_tls_generate -%} + - name: TLS_TO_RELAY_ENABLED + value: "true" + - name: FLOWS_API_ADDR + value: "hubble-relay:443" + - name: TLS_RELAY_SERVER_NAME + value: ui.{{ cilium_cluster_name }}.hubble-grpc.cilium.io + - name: TLS_RELAY_CA_CERT_FILES + value: /var/lib/hubble-ui/certs/hubble-server-ca.crt + - name: TLS_RELAY_CLIENT_CERT_FILE + value: /var/lib/hubble-ui/certs/client.crt + - name: TLS_RELAY_CLIENT_KEY_FILE + value: /var/lib/hubble-ui/certs/client.key + {% else -%} + - name: FLOWS_API_ADDR + value: "hubble-relay:80" + {% endif %} + + volumeMounts: + - name: tls + mountPath: /var/lib/hubble-ui/certs + readOnly: true + ports: + - containerPort: 8090 + name: grpc + resources: + {} + volumes: + - configMap: + defaultMode: 420 + name: hubble-ui-nginx + name: hubble-ui-nginx-conf + - projected: + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: ca.crt + path: hubble-server-ca.crt + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key + name: tls + - emptyDir: {} + name: tmp-dir +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/job.yml.j2 b/roles/network_plugin/cilium/templates/hubble/job.yml.j2 new file mode 100644 index 00000000000..9ad3ae318a6 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/job.yml.j2 @@ -0,0 +1,34 @@ +--- +# Source: cilium/templates/hubble-generate-certs-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hubble-generate-certs + namespace: kube-system + labels: + k8s-app: hubble-generate-certs +spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + serviceAccount: hubble-generate-certs + serviceAccountName: hubble-generate-certs + containers: + - name: certgen + image: "{{ cilium_hubble_certgen_image_repo }}:{{ cilium_hubble_certgen_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + {% for key, value in cilium_certgen_args.items() -%} + - "--{{ key }}={{ value }}" + {% endfor %} + + hostNetwork: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 diff --git a/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 b/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 new file mode 100644 index 00000000000..46de08179d4 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/sa.yml.j2 @@ -0,0 +1,25 @@ +{% if cilium_hubble_tls_generate %} +--- +# Source: cilium/templates/hubble-generate-certs-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-generate-certs + namespace: kube-system +{% endif %} +--- +# Source: cilium/templates/hubble-relay-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-relay + namespace: kube-system +{% if cilium_enable_hubble_ui %} +--- +# Source: cilium/templates/hubble-ui-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-ui + namespace: kube-system +{% endif %} diff --git a/roles/network_plugin/cilium/templates/hubble/service.yml.j2 b/roles/network_plugin/cilium/templates/hubble/service.yml.j2 new file mode 100644 index 00000000000..982487cb023 --- /dev/null +++ b/roles/network_plugin/cilium/templates/hubble/service.yml.j2 @@ -0,0 +1,106 @@ +{% if cilium_enable_prometheus or cilium_enable_hubble_metrics %} +--- +# Source: cilium/templates/cilium-agent-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-metrics + namespace: kube-system + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ cilium_hubble_scrape_port }}" + labels: + k8s-app: hubble +spec: + clusterIP: None + type: ClusterIP + ports: + - name: hubble-metrics + port: 9091 + protocol: TCP + targetPort: hubble-metrics + selector: + k8s-app: cilium +--- +# Source: cilium/templates/hubble-relay/metrics-service.yaml +# We use a separate service from hubble-relay which can be exposed externally +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay-metrics + namespace: kube-system + labels: + k8s-app: hubble-relay + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: "9966" +spec: + clusterIP: None + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - name: metrics + port: 9966 + protocol: TCP + targetPort: prometheus + +{% endif %} +--- +# Source: cilium/templates/hubble-relay-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay + namespace: kube-system + labels: + k8s-app: hubble-relay +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay + ports: + - protocol: TCP + {% if cilium_hubble_tls_generate -%} + port: 443 + {% else -%} + port: 80 + {% endif -%} + targetPort: 4245 +--- +{% if cilium_enable_hubble_ui %} +# Source: cilium/templates/hubble-ui-service.yaml +kind: Service +apiVersion: v1 +metadata: + name: hubble-ui + labels: + k8s-app: hubble-ui + namespace: kube-system +spec: + selector: + k8s-app: hubble-ui + ports: + - name: http + port: 80 + targetPort: 8081 + type: ClusterIP +--- +{% endif %} +# Source: cilium/templates/hubble/peer-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: hubble-peer + namespace: kube-system + labels: + k8s-app: cilium +spec: + selector: + k8s-app: cilium + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + internalTrafficPolicy: Local diff --git a/roles/network_plugin/cilium/templates/values.yaml.j2 b/roles/network_plugin/cilium/templates/values.yaml.j2 deleted file mode 100644 index 2bd101c2f43..00000000000 --- a/roles/network_plugin/cilium/templates/values.yaml.j2 +++ /dev/null @@ -1,172 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True -MTU: {{ cilium_mtu }} -debug: - enabled: {{ cilium_debug | to_json }} - -image: - repository: {{ cilium_image_repo }} - tag: {{ cilium_image_tag }} - -k8sServiceHost: "auto" -k8sServicePort: "auto" - -ipv4: - enabled: {{ cilium_enable_ipv4 | to_json }} -ipv6: - enabled: {{ cilium_enable_ipv6 | to_json }} - -l2announcements: - enabled: {{ cilium_l2announcements | to_json }} - -bgpControlPlane: - enabled: {{ cilium_enable_bgp_control_plane | to_json }} - -healthPort: {{ cilium_agent_health_port }} - -identityAllocationMode: {{ cilium_identity_allocation_mode }} - -tunnelProtocol: {{ cilium_tunnel_mode }} - -loadbalancer: - mode: {{ cilium_loadbalancer_mode }} - -kubeProxyReplacement: {{ cilium_kube_proxy_replacement | to_json }} - -{% if cilium_dns_proxy_enable_transparent_mode is defined %} -dnsProxy: - enableTransparentMode: {{ cilium_dns_proxy_enable_transparent_mode | to_json }} -{% endif %} - -extraVolumes: - {{ cilium_agent_extra_volumes | to_nice_yaml(indent=2) | indent(2) }} - -extraVolumeMounts: - {{ cilium_agent_extra_volume_mounts | to_nice_yaml(indent=2) | indent(2) }} - -extraArgs: - {{ cilium_agent_extra_args | to_nice_yaml(indent=2) | indent(2) }} - -bpf: - masquerade: {{ cilium_enable_bpf_masquerade | to_json }} - hostLegacyRouting: {{ cilium_enable_host_legacy_routing | to_json }} - monitorAggregation: {{ cilium_monitor_aggregation }} - preallocateMaps: {{ cilium_preallocate_bpf_maps | to_json }} - mapDynamicSizeRatio: {{ cilium_bpf_map_dynamic_size_ratio }} - -cni: - exclusive: {{ cilium_cni_exclusive | to_json }} - logFile: {{ cilium_cni_log_file }} - -autoDirectNodeRoutes: {{ cilium_auto_direct_node_routes | to_json }} - -ipv4NativeRoutingCIDR: {{ cilium_native_routing_cidr }} -ipv6NativeRoutingCIDR: {{ cilium_native_routing_cidr_ipv6 }} - -encryption: - enabled: {{ cilium_encryption_enabled | to_json }} -{% if cilium_encryption_enabled %} - type: {{ cilium_encryption_type }} -{% if cilium_encryption_type == 'wireguard' %} - nodeEncryption: {{ cilium_encryption_node_encryption | to_json }} -{% endif %} -{% endif %} - -bandwidthManager: - enabled: {{ cilium_enable_bandwidth_manager | to_json }} - bbr: {{ cilium_enable_bandwidth_manager_bbr | to_json }} - -ipMasqAgent: - enabled: {{ cilium_ip_masq_agent_enable | to_json }} -{% if cilium_ip_masq_agent_enable %} - config: - nonMasqueradeCIDRs: {{ cilium_non_masquerade_cidrs }} - masqLinkLocal: {{ cilium_masq_link_local | to_json }} - masqLinkLocalIPv6: {{ cilium_masq_link_local_ipv6 | to_json }} - # cilium_ip_masq_resync_interval -{% endif %} - -hubble: - peerService: - clusterDomain: {{ cilium_hubble_peer_service_cluster_domain }} - enabled: {{ cilium_enable_hubble | to_json }} - relay: - enabled: {{ cilium_enable_hubble | to_json }} - image: - repository: {{ cilium_hubble_relay_image_repo }} - tag: {{ cilium_hubble_relay_image_tag }} - ui: - enabled: {{ cilium_enable_hubble_ui | to_json }} - backend: - image: - repository: {{ cilium_hubble_ui_backend_image_repo }} - tag: {{ cilium_hubble_ui_backend_image_tag }} - frontend: - image: - repository: {{ cilium_hubble_ui_image_repo }} - tag: {{ cilium_hubble_ui_image_tag }} - metrics: - enabled: {{ cilium_hubble_metrics | to_json }} - export: - fileMaxBackups: {{ cilium_hubble_export_file_max_backups }} - fileMaxSizeMb: {{ cilium_hubble_export_file_max_size_mb }} - dynamic: - enabled: {{ cilium_hubble_export_dynamic_enabled | to_json }} - config: - content: - {{ cilium_hubble_export_dynamic_config_content | to_nice_yaml(indent=10) | indent(10) }} - -gatewayAPI: - enabled: {{ cilium_gateway_api_enabled | to_json }} - -ipam: - mode: {{ cilium_ipam_mode }} - operator: - clusterPoolIPv4PodCIDRList: - - {{ cilium_pool_cidr | default(kube_pods_subnet) }} - clusterPoolIPv4MaskSize: {{ cilium_pool_mask_size | default(kube_network_node_prefix) }} - - clusterPoolIPv6PodCIDRList: - - {{ cilium_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }} - clusterPoolIPv6MaskSize: {{ cilium_pool_mask_size_ipv6 | default(kube_network_node_prefix_ipv6) }} - -cgroup: - autoMount: - enabled: {{ cilium_cgroup_auto_mount | to_json }} - hostRoot: {{ cilium_cgroup_host_root }} - -operator: - image: - repository: {{ cilium_operator_image_repo }} - tag: {{ cilium_operator_image_tag }} - replicas: {{ cilium_operator_replicas }} - extraArgs: - {{ cilium_operator_extra_args | to_nice_yaml(indent=2) | indent(4) }} - extraVolumes: - {{ cilium_operator_extra_volumes | to_nice_yaml(indent=2) | indent(4) }} - extraVolumeMounts: - {{ cilium_operator_extra_volume_mounts | to_nice_yaml(indent=2) | indent(4) }} - tolerations: - {{ cilium_operator_tolerations | to_nice_yaml(indent=2) | indent(4) }} - -cluster: - id: {{ cilium_cluster_id }} - name: {{ cilium_cluster_name }} - -enableIPv4Masquerade: {{ cilium_enable_ipv4_masquerade | to_json }} -enableIPv6Masquerade: {{ cilium_enable_ipv6_masquerade | to_json }} - -hostFirewall: - enabled: {{ cilium_enable_host_firewall | to_json }} - -certgen: - image: - repository: {{ cilium_hubble_certgen_image_repo }} - tag: {{ cilium_hubble_certgen_image_tag }} - -envoy: - image: - repository: {{ cilium_hubble_envoy_image_repo }} - tag: {{ cilium_hubble_envoy_image_tag }} - -extraConfig: - {{ cilium_config_extra_vars | to_yaml | indent(2) }} diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index 8fea555e44f..94603fcf52c 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -19,20 +19,3 @@ register: flannel_node_manifests when: - inventory_hostname == groups['kube_control_plane'][0] - -- name: Flannel | Start Resources - kube: - name: "{{ item.item.name }}" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - with_items: "{{ flannel_node_manifests.results }}" - when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped - -- name: Flannel | Wait for flannel subnet.env file presence - wait_for: - path: /run/flannel/subnet.env - delay: 5 - timeout: 600 diff --git a/roles/network_plugin/kube-ovn/tasks/main.yml b/roles/network_plugin/kube-ovn/tasks/main.yml index 3d278462c89..a8b94279202 100644 --- a/roles/network_plugin/kube-ovn/tasks/main.yml +++ b/roles/network_plugin/kube-ovn/tasks/main.yml @@ -15,12 +15,3 @@ - {name: ovn, file: cni-ovn.yml} - {name: kube-ovn, file: cni-kube-ovn.yml} register: kube_ovn_node_manifests - -- name: Kube-OVN | Start Resources - kube: - name: "{{ item.item.name }}" - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - with_items: "{{ kube_ovn_node_manifests.results }}" - when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml index 561ed688734..d47a0d1e2a6 100644 --- a/roles/network_plugin/kube-router/tasks/main.yml +++ b/roles/network_plugin/kube-router/tasks/main.yml @@ -60,25 +60,3 @@ mode: "0644" delegate_to: "{{ groups['kube_control_plane'] | first }}" run_once: true - -- name: Kube-router | Start Resources - kube: - name: "kube-router" - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/kube-router.yml" - resource: "ds" - namespace: "kube-system" - state: "latest" - delegate_to: "{{ groups['kube_control_plane'] | first }}" - run_once: true - -- name: Kube-router | Wait for kube-router pods to be ready - command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors - register: pods_not_ready - until: pods_not_ready.stdout.find("kube-router")==-1 - retries: 30 - delay: 10 - ignore_errors: true - delegate_to: "{{ groups['kube_control_plane'] | first }}" - run_once: true - changed_when: false diff --git a/roles/network_plugin/meta/main.yml b/roles/network_plugin/meta/main.yml index da0cf75efa1..1f2f99df483 100644 --- a/roles/network_plugin/meta/main.yml +++ b/roles/network_plugin/meta/main.yml @@ -4,7 +4,7 @@ dependencies: when: kube_network_plugin != 'none' - role: network_plugin/cilium - when: kube_network_plugin == 'cilium' or cilium_deploy_additionally + when: kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool tags: - cilium @@ -18,6 +18,11 @@ dependencies: tags: - flannel + - role: network_plugin/weave + when: kube_network_plugin == 'weave' + tags: + - weave + - role: network_plugin/macvlan when: kube_network_plugin == 'macvlan' tags: diff --git a/roles/network_plugin/multus/tasks/main.yml b/roles/network_plugin/multus/tasks/main.yml index 3b0819d81d9..0869da7b54e 100644 --- a/roles/network_plugin/multus/tasks/main.yml +++ b/roles/network_plugin/multus/tasks/main.yml @@ -27,28 +27,10 @@ - {name: multus-daemonset-crio, file: multus-daemonset-crio.yml, type: daemonset, engine: crio } register: multus_manifest_2 vars: - host_query: "*|[?container_manager=='{{ container_manager }}']|[0].inventory_hostname" - vars_from_node: "{{ hostvars | json_query(host_query) }}" + query: "*|[?container_manager=='{{ container_manager }}']|[0].inventory_hostname" + vars_from_node: "{{ hostvars | json_query(query) }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: - item.engine in container_manager_types - hostvars[inventory_hostname].container_manager == item.engine - inventory_hostname == vars_from_node - -- name: Multus | Start resources - kube: - name: "{{ item.item.name }}" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - delegate_to: "{{ groups['kube_control_plane'][0] }}" - run_once: true - with_items: "{{ (multus_manifest_1.results | default([])) + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | map('default', []) | list | json_query('[].results')) }}" - loop_control: - label: "{{ item.item.name if item != None else 'skipped' }}" - vars: - multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}" - when: - - not item is skipped diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml new file mode 100644 index 00000000000..337d8e79982 --- /dev/null +++ b/roles/network_plugin/weave/defaults/main.yml @@ -0,0 +1,64 @@ +--- + +# Weave's network password for encryption, if null then no network encryption. +weave_password: ~ + +# If set to 1, disable checking for new Weave Net versions (default is blank, +# i.e. check is enabled) +weave_checkpoint_disable: false + +# Soft limit on the number of connections between peers. Defaults to 100. +weave_conn_limit: 100 + +# Weave Net defaults to enabling hairpin on the bridge side of the veth pair +# for containers attached. If you need to disable hairpin, e.g. your kernel is +# one of those that can panic if hairpin is enabled, then you can disable it by +# setting `HAIRPIN_MODE=false`. +weave_hairpin_mode: true + +# The range of IP addresses used by Weave Net and the subnet they are placed in +# (CIDR format; default 10.32.0.0/12) +weave_ipalloc_range: "{{ kube_pods_subnets }}" + +# Set to 0 to disable Network Policy Controller (default is on) +weave_expect_npc: "{{ enable_network_policy }}" + +# List of addresses of peers in the Kubernetes cluster (default is to fetch the +# list from the api-server) +weave_kube_peers: ~ + +# Set the initialization mode of the IP Address Manager (defaults to consensus +# amongst the KUBE_PEERS) +weave_ipalloc_init: ~ + +# Set the IP address used as a gateway from the Weave network to the host +# network - this is useful if you are configuring the addon as a static pod. +weave_expose_ip: ~ + +# Address and port that the Weave Net daemon will serve Prometheus-style +# metrics on (defaults to 0.0.0.0:6782) +weave_metrics_addr: ~ + +# Address and port that the Weave Net daemon will serve status requests on +# (defaults to disabled) +weave_status_addr: ~ + +# Weave Net defaults to 1376 bytes, but you can set a smaller size if your +# underlying network has a tighter limit, or set a larger size for better +# performance if your network supports jumbo frames (e.g. 8916) +weave_mtu: 1376 + +# Set to 1 to preserve the client source IP address when accessing Service +# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works +# only with Weave IPAM (default). +weave_no_masq_local: true + +# set to nft to use nftables backend for iptables (default is iptables) +weave_iptables_backend: ~ + +# Extra variables that passing to launch.sh, useful for enabling seed mode, see +# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/ +weave_extra_args: ~ + +# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error +weave_npc_extra_args: ~ diff --git a/roles/network_plugin/weave/meta/main.yml b/roles/network_plugin/weave/meta/main.yml new file mode 100644 index 00000000000..9b7065f1854 --- /dev/null +++ b/roles/network_plugin/weave/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: network_plugin/cni diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml new file mode 100644 index 00000000000..ccb43135219 --- /dev/null +++ b/roles/network_plugin/weave/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Weave | Create manifest + template: + src: weave-net.yml.j2 + dest: "{{ kube_config_dir }}/weave-net.yml" + mode: "0644" + +- name: Weave | Fix nodePort for Weave + template: + src: 10-weave.conflist.j2 + dest: /etc/cni/net.d/10-weave.conflist + mode: "0644" diff --git a/roles/network_plugin/weave/templates/10-weave.conflist.j2 b/roles/network_plugin/weave/templates/10-weave.conflist.j2 new file mode 100644 index 00000000000..9aab7e98c07 --- /dev/null +++ b/roles/network_plugin/weave/templates/10-weave.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.0", + "name": "weave", + "plugins": [ + { + "name": "weave", + "type": "weave-net", + "hairpinMode": {{ weave_hairpin_mode | bool | lower }} + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] +} diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 new file mode 100644 index 00000000000..3a3886510ac --- /dev/null +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -0,0 +1,297 @@ +--- +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: weave-net + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - pods + - namespaces + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - 'networking.k8s.io' + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - nodes/status + verbs: + - patch + - update + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: weave-net + labels: + name: weave-net + roleRef: + kind: ClusterRole + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + rules: + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - weave-net + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: weave-net + namespace: kube-system + labels: + name: weave-net + roleRef: + kind: Role + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: kube-system + - apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: weave-net + labels: + name: weave-net + namespace: kube-system + spec: + # Wait 5 seconds to let pod connect before rolling next pod + selector: + matchLabels: + name: weave-net + minReadySeconds: 5 + template: + metadata: + labels: + name: weave-net + spec: + initContainers: + - name: weave-init + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: + - /home/weave/init.sh + env: + securityContext: + privileged: true + volumeMounts: + - name: cni-bin + mountPath: /host/opt + - name: cni-bin2 + mountPath: /host/home + - name: cni-conf + mountPath: /host/etc + - name: lib-modules + mountPath: /lib/modules + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + containers: + - name: weave + command: + - /home/weave/launch.sh + env: + - name: INIT_CONTAINER + value: "true" + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: WEAVE_PASSWORD + valueFrom: + secretKeyRef: + name: weave-net + key: WEAVE_PASSWORD + - name: CHECKPOINT_DISABLE + value: "{{ weave_checkpoint_disable | bool | int }}" + - name: CONN_LIMIT + value: "{{ weave_conn_limit | int }}" + - name: HAIRPIN_MODE + value: "{{ weave_hairpin_mode | bool | lower }}" + - name: IPALLOC_RANGE + value: "{{ weave_ipalloc_range }}" + - name: EXPECT_NPC + value: "{{ weave_expect_npc | bool | int }}" +{% if weave_kube_peers %} + - name: KUBE_PEERS + value: "{{ weave_kube_peers }}" +{% endif %} +{% if weave_ipalloc_init %} + - name: IPALLOC_INIT + value: "{{ weave_ipalloc_init }}" +{% endif %} +{% if weave_expose_ip %} + - name: WEAVE_EXPOSE_IP + value: "{{ weave_expose_ip }}" +{% endif %} +{% if weave_metrics_addr %} + - name: WEAVE_METRICS_ADDR + value: "{{ weave_metrics_addr }}" +{% endif %} +{% if weave_status_addr %} + - name: WEAVE_STATUS_ADDR + value: "{{ weave_status_addr }}" +{% endif %} +{% if weave_iptables_backend %} + - name: IPTABLES_BACKEND + value: "{{ weave_iptables_backend }}" +{% endif %} + - name: WEAVE_MTU + value: "{{ weave_mtu | int }}" + - name: NO_MASQ_LOCAL + value: "{{ weave_no_masq_local | bool | int }}" +{% if weave_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_extra_args }}" +{% endif %} + image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /status + port: 6784 + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: weavedb + mountPath: /weavedb + - name: dbus + mountPath: /host/var/lib/dbus + readOnly: true + - mountPath: /host/etc/machine-id + name: cni-machine-id + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + - name: weave-npc + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName +{% if weave_npc_extra_args %} + - name: EXTRA_ARGS + value: "{{ weave_npc_extra_args }}" +{% endif %} + image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: 50m + securityContext: + privileged: true + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: false + restartPolicy: Always + securityContext: + seLinuxOptions: {} + serviceAccountName: weave-net + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: weavedb + hostPath: + path: /var/lib/weave + - name: cni-bin + hostPath: + path: /opt + - name: cni-bin2 + hostPath: + path: /home + - name: cni-conf + hostPath: + path: /etc + - name: cni-machine-id + hostPath: + path: /etc/machine-id + - name: dbus + hostPath: + path: /var/lib/dbus + - name: lib-modules + hostPath: + path: /lib/modules + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + priorityClassName: system-node-critical + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate + - apiVersion: v1 + kind: Secret + metadata: + name: weave-net + namespace: kube-system + data: + WEAVE_PASSWORD: "{{ weave_password | default("") | b64encode }}" diff --git a/scripts/gitlab-runner.sh b/scripts/gitlab-runner.sh deleted file mode 100644 index c05ee7ea188..00000000000 --- a/scripts/gitlab-runner.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -docker run -d --name gitlab-runner --restart always -v /srv/gitlab-runner/cache:/srv/gitlab-runner/cache -v /srv/gitlab-runner/config:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:v1.10.0 - -# -#/srv/gitlab-runner/config# cat config.toml -#concurrent = 10 -#check_interval = 1 - -#[[runners]] -# name = "2edf3d71fe19" -# url = "https://gitlab.com" -# token = "THE TOKEN-CHANGEME" -# executor = "docker" -# [runners.docker] -# tls_verify = false -# image = "docker:latest" -# privileged = true -# disable_cache = false -# cache_dir = "/srv/gitlab-runner/cache" -# volumes = ["/var/run/docker.sock:/var/run/docker.sock", "/srv/gitlab-runner/cache:/cache:rw"] -# [runners.cache] diff --git a/test-infra/vagrant-docker/Dockerfile b/test-infra/vagrant-docker/Dockerfile deleted file mode 100644 index 7a0f0e08da4..00000000000 --- a/test-infra/vagrant-docker/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Docker image published at quay.io/kubespray/vagrant - -ARG KUBESPRAY_VERSION -FROM quay.io/kubespray/kubespray:${KUBESPRAY_VERSION} - -ENV VAGRANT_VERSION=2.3.7 -ENV VAGRANT_DEFAULT_PROVIDER=libvirt -ENV VAGRANT_ANSIBLE_TAGS=facts - -RUN apt-get update && apt-get install -y wget libvirt-dev openssh-client rsync git build-essential - -# Install Vagrant -RUN wget https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_amd64.deb && \ - dpkg -i vagrant_${VAGRANT_VERSION}-1_amd64.deb && \ - rm vagrant_${VAGRANT_VERSION}-1_amd64.deb && \ - vagrant plugin install vagrant-libvirt diff --git a/test-infra/vagrant-docker/README.md b/test-infra/vagrant-docker/README.md deleted file mode 100644 index 36dcb9e9622..00000000000 --- a/test-infra/vagrant-docker/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# vagrant docker image - -This image is used for the vagrant CI jobs. It is using the libvirt driver. - -## Usage - -```console -$ docker run --net host --rm -it -v /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock quay.io/kubespray/vagrant -$ vagrant up -Bringing machine 'k8s-1' up with 'libvirt' provider... -Bringing machine 'k8s-2' up with 'libvirt' provider... -Bringing machine 'k8s-3' up with 'libvirt' provider... -[...] -``` - -## Cache - -You can set `/root/kubespray_cache` as a volume to keep cache between runs. - -## Building - -```shell -./build.sh v2.12.5 -``` diff --git a/test-infra/vagrant-docker/build.sh b/test-infra/vagrant-docker/build.sh deleted file mode 100755 index dcf54456b91..00000000000 --- a/test-infra/vagrant-docker/build.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -set -euo pipefail - -if [ "$#" -ne 1 ]; then - echo "Usage: $0 tag" >&2 - exit 1 -fi - -VERSION="$1" -IMG="quay.io/kubespray/vagrant:${VERSION}" - -docker build . --build-arg "KUBESPRAY_VERSION=${VERSION}" --tag "$IMG" -docker push "$IMG" diff --git a/tests/files/custom_cni/cilium.yaml b/tests/files/custom_cni/cilium.yaml index c89ae15ebf9..a9b2069609c 100644 --- a/tests/files/custom_cni/cilium.yaml +++ b/tests/files/custom_cni/cilium.yaml @@ -1034,7 +1034,7 @@ spec: type: Unconfined containers: - name: cilium-agent - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - cilium-agent @@ -1185,7 +1185,7 @@ spec: mountPath: /tmp initContainers: - name: config - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - cilium-dbg @@ -1208,7 +1208,7 @@ spec: # Required to mount cgroup2 filesystem on the underlying Kubernetes node. # We use nsenter command with host's cgroup and mount namespaces enabled. - name: mount-cgroup - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent env: - name: CGROUP_ROOT @@ -1245,7 +1245,7 @@ spec: drop: - ALL - name: apply-sysctl-overwrites - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent env: - name: BIN_PATH @@ -1283,7 +1283,7 @@ spec: # from a privileged container because the mount propagation bidirectional # only works from privileged containers. - name: mount-bpf-fs - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' @@ -1299,7 +1299,7 @@ spec: mountPath: /sys/fs/bpf mountPropagation: Bidirectional - name: clean-cilium-state - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - /init-container.sh @@ -1346,7 +1346,7 @@ spec: mountPath: /var/run/cilium # wait-for-kube-proxy # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent - name: install-cni-binaries - image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + image: "quay.io/cilium/cilium:v1.18.1@sha256:65ab17c052d8758b2ad157ce766285e04173722df59bdee1ea6d5fda7149f0e9" imagePullPolicy: IfNotPresent command: - "/install-plugin.sh" @@ -1685,7 +1685,7 @@ spec: spec: containers: - name: cilium-operator - image: "quay.io/cilium/operator-generic:v1.16.3@sha256:6e2925ef47a1c76e183c48f95d4ce0d34a1e5e848252f910476c3e11ce1ec94b" + image: "quay.io/cilium/operator-generic:v1.18.1@sha256:97f4553afa443465bdfbc1cc4927c93f16ac5d78e4dd2706736e7395382201bc" imagePullPolicy: IfNotPresent command: - cilium-operator-generic