diff --git a/nova/core/galaxy.yml b/nova/core/galaxy.yml index b50d195e8..d9c4dd7d2 100644 --- a/nova/core/galaxy.yml +++ b/nova/core/galaxy.yml @@ -1,6 +1,6 @@ namespace: nova name: core -version: 6.10.17 +version: 7.1.10 readme: README.md authors: - https://github.com/novateams diff --git a/nova/core/roles/accounts/tasks/routeros.yml b/nova/core/roles/accounts/tasks/routeros.yml index 8d0c04b34..79bedb88c 100644 --- a/nova/core/roles/accounts/tasks/routeros.yml +++ b/nova/core/roles/accounts/tasks/routeros.yml @@ -13,7 +13,7 @@ ansible_deployer_password: "{{ admin_accounts_with_password | selectattr('username', 'equalto', ansible_deployer_username) | map(attribute='password') | first }}" -- name: Adding RSA ssh key for following users... +- name: Adding ssh key for following users... community.routeros.command: commands: - /file print file=ssh_user_key_{{ item.username }}.txt @@ -23,9 +23,7 @@ loop: "{{ admin_accounts_with_password }}" loop_control: label: "{{ item.username }}" - when: - - item.ssh_key is defined - - item.ssh_key is search("ssh-rsa") # Because EC is not supported + when: item.ssh_key is defined # Required because of RouterOS SSH MaxAuthTries default is too small - name: Allowing password login... diff --git a/nova/core/roles/caddy/tasks/main.yml b/nova/core/roles/caddy/tasks/main.yml index 04adb0c8a..9aed7873f 100644 --- a/nova/core/roles/caddy/tasks/main.yml +++ b/nova/core/roles/caddy/tasks/main.yml @@ -18,6 +18,7 @@ community.docker.docker_compose_v2: project_src: "{{ caddy_config_folder }}" state: present + build: never wait: true - name: Formatting Caddyfile... @@ -31,6 +32,7 @@ community.docker.docker_compose_v2: project_src: "{{ caddy_config_folder }}" state: restarted + build: never wait: true - name: Including Caddyfile API configuration tasks... diff --git a/nova/core/roles/configure_networking/defaults/main.yml b/nova/core/roles/configure_networking/defaults/main.yml index 74172b610..1e08550ca 100644 --- a/nova/core/roles/configure_networking/defaults/main.yml +++ b/nova/core/roles/configure_networking/defaults/main.yml @@ -98,10 +98,15 @@ configure_networking_panos_boot_wait_time: 60 # Proxmox # ########### +# How many seconds to wait for different networking configuration commands to complete over the Qemu Guest Agent +# Increasing this may help on slower systems or when Windows is running sysprep on first boot +# There's a separate timeout command for waiting that the command sent over the guest agent has completed called - configure_networking_command_wait_timeout +configure_networking_proxmox_timeout: 300 + # This is a list of interfaces to exclude when configuring networking on Proxmox VMs # These are typically non-physical interfaces like loopback configure_networking_proxmox_interfaces_to_exclude: - - lo + - lo # Linux loopback - lo0 # OPNsense loopback - enc0 # OPNsense encryption - pfsync0 # OPNsense pfsync diff --git a/nova/core/roles/configure_networking/tasks/proxmox/command_run_check.yml b/nova/core/roles/configure_networking/tasks/proxmox/command_run_check.yml index fe21bd194..445611680 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/command_run_check.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/command_run_check.yml @@ -1,8 +1,8 @@ --- - name: Verifying that the previous command was successful... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid + url: + "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec-status?pid={{ network_config_command.json.data.pid }}" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} diff --git a/nova/core/roles/configure_networking/tasks/proxmox/interfaces.yml b/nova/core/roles/configure_networking/tasks/proxmox/interfaces.yml index 78e116eef..164029145 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/interfaces.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/interfaces.yml @@ -37,8 +37,7 @@ block: - name: Writing following configuration to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -64,8 +63,7 @@ - name: Rebuilding initramfs on {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST diff --git a/nova/core/roles/configure_networking/tasks/proxmox/main.yml b/nova/core/roles/configure_networking/tasks/proxmox/main.yml index 1a1eb496b..65eae045b 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/main.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/main.yml @@ -22,8 +22,11 @@ configure_networking_mac_addresses: "{{ proxmox_vm_info.proxmox_vms[0].network | map(attribute='hardware-address', default='00:00:00:00:00:00') | reject('equalto', '00:00:00:00:00:00') | list }}" - configure_networking_hw_interfaces: "{{ proxmox_vm_info.proxmox_vms[0].network | map(attribute='name') + configure_networking_hw_interfaces: + "{{ proxmox_vm_info.proxmox_vms[0].network | map(attribute='name') | reject('in', configure_networking_proxmox_interfaces_to_exclude) | list }}" + cfg_net_proxmox_node: "{{ proxmox_vm_info.proxmox_vms[0].node }}" + cfg_net_proxmox_vmid: "{{ proxmox_vm_info.proxmox_vms[0].vmid }}" - name: Checking if network customization method exists... ansible.builtin.stat: diff --git a/nova/core/roles/configure_networking/tasks/proxmox/netplan.yml b/nova/core/roles/configure_networking/tasks/proxmox/netplan.yml index 47eb43cad..e63057ab2 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/netplan.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/netplan.yml @@ -17,8 +17,7 @@ - name: Removing any existing netplan configurations... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -36,8 +35,7 @@ - name: Writing netplan configuration to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -50,8 +48,7 @@ - name: Fixing permissions on netplan configuration file... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -69,8 +66,7 @@ - name: Applying netplan... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST diff --git a/nova/core/roles/configure_networking/tasks/proxmox/networkd.yml b/nova/core/roles/configure_networking/tasks/proxmox/networkd.yml index cbc77cfd9..221db47cc 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/networkd.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/networkd.yml @@ -5,8 +5,7 @@ block: - name: Removing any existing network configurations... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -44,8 +43,7 @@ - name: Writing networkd configuration to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -81,8 +79,7 @@ - name: Writing networkd links to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -98,8 +95,7 @@ - name: Starting networkd & udev services... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -130,8 +126,7 @@ - name: Writing resolv.conf to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -144,9 +139,7 @@ - name: Getting OS type... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid - }}/agent/file-read?file=/etc/os-release" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-read?file=/etc/os-release" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: GET @@ -169,7 +162,7 @@ block: - name: Configuring network interface(s) names on Debian... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -190,7 +183,7 @@ block: - name: Getting DNS from DHCP... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -211,7 +204,7 @@ block: - name: Configuring network interface(s) names on Arch... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST diff --git a/nova/core/roles/configure_networking/tasks/proxmox/nmcli.yml b/nova/core/roles/configure_networking/tasks/proxmox/nmcli.yml index 49ac57e1f..4fb37c6a0 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/nmcli.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/nmcli.yml @@ -3,6 +3,39 @@ become: false delegate_to: localhost block: + - name: Getting OS... + ansible.builtin.uri: + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-read?file=/etc/os-release" + headers: + Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} + method: GET + validate_certs: "{{ proxmox_validate_certs }}" + register: network_config_os + + # Check if root is mounted read-only and remount as read-write + # This an issue specific to Kali where sometimes root is mounted read-only + # Done here because the check needs to happen in the very early stage of the boot + - name: Remounting Kali root filesystem as read-write if needed... + when: network_config_os.json.data.content is search('ID=kali') + block: + - name: Checking that root is remounted read-write... + ansible.builtin.uri: + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" + headers: + Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} + method: POST + body: + command: + - sh + - -c + - "if grep -q ID=kali /etc/os-release; then while mount | grep 'on / ' | grep -q ro,; do mount -o remount,rw / || true; sleep 1; done; fi" + body_format: json + validate_certs: "{{ proxmox_validate_certs }}" + register: network_config_command + + - name: Including command run check task... + ansible.builtin.include_tasks: command_run_check.yml + - name: Templating nmcli configuration files... ansible.builtin.template: src: "{{ item.src }}" @@ -19,8 +52,7 @@ - name: Removing any existing netplan configurations... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -46,8 +78,7 @@ - name: Writing following nmcli configuration files to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -64,8 +95,7 @@ - name: Configuring network... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST diff --git a/nova/core/roles/configure_networking/tasks/proxmox/opnsense.yml b/nova/core/roles/configure_networking/tasks/proxmox/opnsense.yml index f1a0c7f2f..3af99014b 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/opnsense.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/opnsense.yml @@ -1,12 +1,11 @@ --- -- name: Configuring netplan on Proxmox VM... +- name: Configuring OPNsense on Proxmox VM... become: false delegate_to: localhost block: - name: Downloading /conf/config.xml {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-read?file=/conf/config.xml" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-read?file=/conf/config.xml" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: GET @@ -114,8 +113,8 @@ # Writing in 1700 char chunks because otherwise the Qemu Guest Agent service fails - name: Writing /conf/config.xml to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node + }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -136,8 +135,7 @@ - name: Writing final /conf/config.xml to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST diff --git a/nova/core/roles/configure_networking/tasks/proxmox/pfsense.yml b/nova/core/roles/configure_networking/tasks/proxmox/pfsense.yml new file mode 100644 index 000000000..7b0d954ca --- /dev/null +++ b/nova/core/roles/configure_networking/tasks/proxmox/pfsense.yml @@ -0,0 +1,166 @@ +--- +- name: Configuring pfSense on Proxmox VM... + become: false + delegate_to: localhost + block: + - name: Downloading /conf/config.xml {{ custom_vm_name | default(vm_name) }}... + ansible.builtin.uri: + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-read?file=/conf/config.xml" + headers: + Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} + method: GET + validate_certs: "{{ proxmox_validate_certs }}" + register: config_download + + - name: Saving /conf/config.xml to a temporary file... + ansible.builtin.copy: + content: "{{ config_download.json.data.content }}" + dest: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + mode: "0600" + + - name: Deleting existing interfaces... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/interfaces/* + state: absent + + - name: Deleting existing gateways... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/gateways/* + state: absent + + - name: Deleting existing DNS servers... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/system/dnsserver + state: absent + + - name: Templating interfaces config... + ansible.builtin.template: + src: pfsense.yml + dest: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_interfaces.yml + lstrip_blocks: true + mode: "0600" + + - name: Including interfaces config... + ansible.builtin.include_vars: + file: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_interfaces.yml + + - name: Configuring following pfsense interfaces for {{ inventory_hostname }}... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/interfaces + pretty_print: true + add_children: "{{ pfsense_interfaces }}" + + - name: Configuring egress interface gateways for {{ inventory_hostname }}... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/gateways + pretty_print: true + add_children: "{{ pfsense_gateways }}" + + - name: Configuring following DNS server for {{ inventory_hostname }}... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/system + pretty_print: true + add_children: + - dnsserver: "{{ item }}" + loop: "{{ dns_server_combined }}" + + - name: Enabling {{ inventory_hostname }} configured DNS servers... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/system/dnslocalhost + pretty_print: true + value: remote + + - name: Adding WAN Anti-Lockout Rule for WAN interface... + when: interfaces | selectattr('egress', 'equalto', true) | first == interfaces | selectattr('connection', 'equalto', true) | first + block: + - name: Deleting existing WAN Anti-Lockout Rule... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/filter/rule[@descr='WAN Anti-Lockout Rule'] + state: absent + + - name: Adding WAN Anti-Lockout Rule for {{ inventory_hostname }}... + community.general.xml: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + xpath: /pfsense/filter + pretty_print: true + add_children: + - rule: + _: + - id: + - type: pass + - interface: wan + - ipprotocol: inet46 + - statetype: keep state + - source: + _: + - any: + - destination: + _: + - any: + - descr: WAN Anti-Lockout Rule + + - name: Getting network configuration file contents... + ansible.builtin.slurp: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + register: file_contents + + - name: Getting the nr of config.xml chunks... + ansible.builtin.set_fact: + config_file_chunks: "{{ (file_contents.content | length + 1699) // 1700 }}" + + # Writing in 1700 char chunks because otherwise the Qemu Guest Agent service fails + - name: Writing /conf/config.xml to {{ custom_vm_name | default(vm_name) }}... + ansible.builtin.uri: + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node + }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" + headers: + Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} + method: POST + body: + content: "{{ item | b64decode }}" + file: /tmp/config_{{ '%03d' | format(file_loop) }}.xml + body_format: json + validate_certs: "{{ proxmox_validate_certs }}" + loop: "{{ file_contents.content | regex_findall('.{1,1700}') }}" + loop_control: + index_var: file_loop + label: "{{ file_loop + 1 }}/{{ config_file_chunks }}" + + - name: Removing local config.xml file... + ansible.builtin.file: + path: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml + state: absent + + - name: Writing final /conf/config.xml to {{ custom_vm_name | default(vm_name) }}... + ansible.builtin.uri: + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" + headers: + Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} + method: POST + body: + command: + - sh + - -c + - "cat /tmp/config_*.xml > /conf/config.xml && rm -f /tmp/config_*.xml" + - reboot + body_format: json + validate_certs: "{{ proxmox_validate_certs }}" + register: network_config_command + + - name: Including command run check task... + ansible.builtin.include_tasks: command_run_check.yml + +# This is required for network config to take effect +- name: Restarting {{ custom_vm_name | default(vm_name) }} VM... + ansible.builtin.include_role: + name: nova.core.powerstate + vars: + restart: true diff --git a/nova/core/roles/configure_networking/tasks/proxmox/windows_check_sysprep.yml b/nova/core/roles/configure_networking/tasks/proxmox/windows_check_sysprep.yml index cfbeaf046..c93ddc0d8 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/windows_check_sysprep.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/windows_check_sysprep.yml @@ -5,8 +5,7 @@ block: - name: Saving Sysprep status... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -24,21 +23,19 @@ body_format: json validate_certs: "{{ proxmox_validate_certs }}" register: network_config_command - retries: 60 - delay: 2 + retries: "{{ (configure_networking_proxmox_timeout if configure_networking_proxmox_timeout >= 5 else 5) // 5 }}" + delay: 5 - name: Getting Sysprep status... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid - }}/agent/file-read?file=C:\\Windows\\Temp\\SysprepStatus" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-read?file=C:\\Windows\\Temp\\SysprepStatus" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: GET validate_certs: "{{ proxmox_validate_certs }}" register: sysprep_status - retries: 60 - delay: 2 + retries: "{{ (configure_networking_proxmox_timeout if configure_networking_proxmox_timeout >= 5 else 5) // 5 }}" + delay: 5 - name: Setting Sysprep loop count to {{ sysprep_loop_count | default(0) | int + 1 }}... ansible.builtin.set_fact: @@ -50,11 +47,9 @@ config: current network: true register: proxmox_agent_status - until: - - proxmox_agent_status.msg is defined - - proxmox_agent_status.msg is search('.*agent is not running.*') - failed_when: proxmox_agent_status.failed and proxmox_agent_status.msg is not search('.*agent is not running.*') - retries: 60 + until: proxmox_agent_status.msg | default('') is search('.*agent is not running.*') + failed_when: proxmox_agent_status.failed and proxmox_agent_status.msg | default('') is not search('.*agent is not running.*') + retries: "{{ (configure_networking_proxmox_timeout if configure_networking_proxmox_timeout >= 5 else 5) // 5 }}" delay: 5 when: - sysprep_status.json.data['content'] | trim != "7" diff --git a/nova/core/roles/configure_networking/tasks/proxmox/windows_cli.yml b/nova/core/roles/configure_networking/tasks/proxmox/windows_cli.yml index f1b2bf0ae..71bcafab1 100644 --- a/nova/core/roles/configure_networking/tasks/proxmox/windows_cli.yml +++ b/nova/core/roles/configure_networking/tasks/proxmox/windows_cli.yml @@ -11,8 +11,8 @@ name: "{{ custom_vm_name | default(vm_name) }}" config: current network: true - retries: 30 - delay: 10 + retries: "{{ (configure_networking_proxmox_timeout if configure_networking_proxmox_timeout >= 5 else 5) // 5 }}" + delay: 5 - name: Templating network configuration script... ansible.builtin.template: @@ -28,8 +28,7 @@ - name: Writing PowerShell configuration script to {{ custom_vm_name | default(vm_name) }}... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/file-write" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/file-write" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -41,8 +40,7 @@ - name: Configuring network... ansible.builtin.uri: - url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_info.proxmox_vms[0].node - }}/qemu/{{ proxmox_vm_info.proxmox_vms[0].vmid }}/agent/exec" + url: "{{ proxmox_api_url }}/nodes/{{ cfg_net_proxmox_node }}/qemu/{{ cfg_net_proxmox_vmid }}/agent/exec" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} method: POST @@ -59,7 +57,7 @@ body_format: json validate_certs: "{{ proxmox_validate_certs }}" register: network_config_command - retries: 24 + retries: "{{ (configure_networking_proxmox_timeout if configure_networking_proxmox_timeout >= 5 else 5) // 5 }}" delay: 5 - name: Including command run check task... @@ -69,6 +67,7 @@ ansible.builtin.wait_for: host: "{{ connection_address }}" port: 22 + timeout: "{{ configure_networking_proxmox_timeout }}" rescue: - name: WINDOWS NETWORK ERROR @@ -92,8 +91,8 @@ name: "{{ custom_vm_name | default(vm_name) }}" config: current network: true - retries: 30 - delay: 10 + retries: "{{ (configure_networking_proxmox_timeout if configure_networking_proxmox_timeout >= 5 else 5) // 5 }}" + delay: 5 - name: Re-including Windows CLI tasks... ansible.builtin.include_tasks: windows_cli.yml diff --git a/nova/core/roles/configure_networking/tasks/vsphere/interfaces_alpine.yml b/nova/core/roles/configure_networking/tasks/vsphere/interfaces_alpine.yml index ecdbf90d3..bdaef83d1 100644 --- a/nova/core/roles/configure_networking/tasks/vsphere/interfaces_alpine.yml +++ b/nova/core/roles/configure_networking/tasks/vsphere/interfaces_alpine.yml @@ -3,14 +3,12 @@ become: false delegate_to: localhost block: - # When an IP address is reported by VMware Tools, it is ready to accept console input - # Otherwise the login prompt is not ready in vSphere VMMs console + # Extra check to ensure that the VM is ready for console input, since sometimes the boot can take longer on low-end systems - name: Waiting for {{ custom_vm_name | default(vm_name) }} to be ready for console input... community.vmware.vmware_guest_tools_wait: name: "{{ custom_vm_name | default(vm_name) }}" register: alpine_boot_status - until: alpine_boot_status.instance.ipv4 is not ansible.builtin.falsy - or alpine_boot_status.instance.ipv6 is not ansible.builtin.falsy + until: alpine_boot_status.instance.advanced_settings['guestInfo.detailed.data'] is defined retries: "{{ (configure_networking_alpine_boot_wait_time if configure_networking_alpine_boot_wait_time >= 5 else 5) // 5 }}" delay: 5 diff --git a/nova/core/roles/configure_networking/tasks/vsphere/nmcli.yml b/nova/core/roles/configure_networking/tasks/vsphere/nmcli.yml index 9e16e5b4a..5c732e0da 100644 --- a/nova/core/roles/configure_networking/tasks/vsphere/nmcli.yml +++ b/nova/core/roles/configure_networking/tasks/vsphere/nmcli.yml @@ -29,7 +29,7 @@ # to retry the network configuration up to 3 times before failing the task completely. - name: Including network configuration tasks... block: - # Check if root is mounted read-only + # Check if root is mounted read-only and remount as read-write # This an issue specific to Kali where sometimes root is mounted read-only # Done here because the check needs to happen in the very early stage of the boot - name: Checking if root is mounted read-only... diff --git a/nova/core/roles/configure_networking/tasks/vsphere/panos.yml b/nova/core/roles/configure_networking/tasks/vsphere/panos.yml index d4953767e..03ec61835 100644 --- a/nova/core/roles/configure_networking/tasks/vsphere/panos.yml +++ b/nova/core/roles/configure_networking/tasks/vsphere/panos.yml @@ -23,7 +23,8 @@ community.vmware.vmware_guest_tools_wait: name: "{{ custom_vm_name | default(vm_name) }}" register: panos_boot_status - until: panos_boot_status.instance.ipv4 is not ansible.builtin.falsy + until: + panos_boot_status.instance.ipv4 is not ansible.builtin.falsy or panos_boot_status.instance.ipv6 is not ansible.builtin.falsy retries: "{{ (configure_networking_panos_boot_wait_time if configure_networking_panos_boot_wait_time >= 5 else 5) // 5 }}" delay: 5 @@ -104,7 +105,8 @@ - name: Configuring OOB MGMT interface IPv4 for {{ custom_vm_name | default(vm_name) }}... community.vmware.vmware_guest_sendkey: name: "{{ custom_vm_name | default(vm_name) }}" - string_send: set deviceconfig system ip-address {{ interfaces | nova.core.addresses('mgmt_ipv4') + string_send: + set deviceconfig system ip-address {{ interfaces | nova.core.addresses('mgmt_ipv4') | first | ansible.utils.ipaddr('address') }} netmask {{ interfaces | nova.core.addresses('mgmt_ipv4') | first | ansible.utils.ipaddr('netmask') }} when: interfaces | nova.core.addresses('connection_nic_ipv4') | first | ansible.utils.ipaddr('address') is ansible.utils.ipv4 diff --git a/nova/core/roles/configure_networking/templates/pfsense.yml b/nova/core/roles/configure_networking/templates/pfsense.yml index da002e1d5..301c70692 100644 --- a/nova/core/roles/configure_networking/templates/pfsense.yml +++ b/nova/core/roles/configure_networking/templates/pfsense.yml @@ -2,6 +2,7 @@ {# https://forum.netgate.com/topic/85739/change-order-of-interfaces-on-vm/13 #} {# https://forum.netgate.com/topic/159909/adding-an-interface-to-a-pfsense-esx-vm-causes-them-to-be-re-ordered-on-reboot #} +{% if infra_env == "vsphere" %} {% if interfaces | length == 1 %} {% set nic_map = ["vmx0"] %} {% elif interfaces | length == 2 %} @@ -23,6 +24,9 @@ {% elif interfaces | length == 10 %} {% set nic_map = ["vmx2","vmx5","vmx8","vmx0","vmx3","vmx6","vmx9","vmx1","vmx4","vmx7"] %} {% endif %} +{% else %} + {% set nic_map = configure_networking_hw_interfaces %} +{% endif %} pfsense_interfaces: {% for interface in interfaces %} diff --git a/nova/core/roles/gitlab/defaults/main.yml b/nova/core/roles/gitlab/defaults/main.yml index d8afdfc45..02a00f846 100644 --- a/nova/core/roles/gitlab/defaults/main.yml +++ b/nova/core/roles/gitlab/defaults/main.yml @@ -1,6 +1,6 @@ --- ### gitlab general -gitlab_version: 18.7.0-ee.0 +gitlab_version: 18.8.0-ee.0 gitlab_ssh_port: 10022 # Increase gitlab ssh MaxAuthTries to avoid connection issues for users with more than 6 keys in their SSH agent set to {} to disable gitlab_ssh_max_auth_tries: 20 diff --git a/nova/core/roles/gitlab/tasks/install.yml b/nova/core/roles/gitlab/tasks/install.yml index fd6a38f93..8ea386bae 100644 --- a/nova/core/roles/gitlab/tasks/install.yml +++ b/nova/core/roles/gitlab/tasks/install.yml @@ -1,16 +1,19 @@ --- - name: Creating Gitlab folders.. ansible.builtin.file: - path: "{{ item }}" + path: "{{ item.path }}" state: directory - mode: "0755" + mode: "{{ item.mode | default('0755') }}" + loop_control: + label: "{{ item.path }}" loop: - - "{{ gitlab_config_folder }}" - - "{{ gitlab_config_folder }}/etc" - - "{{ gitlab_config_folder }}/data" - - "{{ gitlab_config_folder }}/logs" - - "{{ gitlab_config_folder }}/registry" - - "{{ gitlab_config_folder }}/temp_configuration" + - path: "{{ gitlab_config_folder }}" + - path: "{{ gitlab_config_folder }}/etc" + mode: u=rwX,g=rwX,o=rX + - path: "{{ gitlab_config_folder }}/data" + - path: "{{ gitlab_config_folder }}/logs" + - path: "{{ gitlab_config_folder }}/registry" + - path: "{{ gitlab_config_folder }}/temp_configuration" - name: Templating Gitlab configuration files for {{ inventory_hostname }}.. ansible.builtin.template: @@ -33,6 +36,7 @@ project_src: "{{ gitlab_config_folder }}" recreate: "{{ 'always' if gitlab_config_files.changed else 'auto' }}" state: present + build: never wait: true wait_timeout: 900 diff --git a/nova/core/roles/gitlab_runner/tasks/docker-install-linux.yml b/nova/core/roles/gitlab_runner/tasks/docker-install-linux.yml index 8cff26c31..c87945135 100644 --- a/nova/core/roles/gitlab_runner/tasks/docker-install-linux.yml +++ b/nova/core/roles/gitlab_runner/tasks/docker-install-linux.yml @@ -23,6 +23,7 @@ project_src: "{{ gitlab_runner_docker_config_folder }}" recreate: "{{ 'always' if gitlab_runner_config.changed else 'auto' }}" state: present + build: never wait: true - name: Cleanup config.toml.. diff --git a/nova/core/roles/keycloak/README.md b/nova/core/roles/keycloak/README.md index 98144ce02..c033179e7 100644 --- a/nova/core/roles/keycloak/README.md +++ b/nova/core/roles/keycloak/README.md @@ -1,6 +1,6 @@ # keycloak -This is a role for installing and configuring Keycloak Docker based on a target machine. +This is a role for installing and configuring Keycloak Docker based on a target machine. It has a limit set of features that can be configured via Ansible variables but more can be added as needed. ## Requirements @@ -11,17 +11,19 @@ This is a role for installing and configuring Keycloak Docker based on a target Refer to the [defaults/main.yml](https://github.com/novateams/nova.core/blob/main/nova/core/roles/keycloak/defaults/main.yml) file for a list and description of the variables used in this role. -## Dependencies +Refer to the to the [templated config file](https://github.com/novateams/nova.core/blob/main/nova/core/roles/keycloak/templates/config.j2) for more details on what variables can be used. -- `nova.core.docker` +To create your own custom configuration template: -## Example +1. Use the one provided in the role as a starting point +2. Export the running Keycloak realm configuration from the admin console +3. Modify the template to include the desired configuration based on the exported configuration -To add extra configuration to vars follow these steps: +## Dependencies -1. Configure the keycloak settings manually and test that they work -2. Export the realm configuration from Keycloak admin console -3. Based on the exported configuration, create the corresponding variables in your host/group vars to be used by this role. +- `nova.core` Ansible collection + +## Example ```yaml # Example on how to install Keycloak with Providentia client and LDAPs (AD) group mapper @@ -47,6 +49,7 @@ dependencies: clients: - client_name: Providentia + protocol: openid-connect # Optional, defaults to openid-connect but can also be saml admin_uri: https://providentia.example.com base_uri: https://providentia.example.com redirect_uris: diff --git a/nova/core/roles/keycloak/defaults/main.yml b/nova/core/roles/keycloak/defaults/main.yml index e0793d5d7..1d9ef7fd8 100644 --- a/nova/core/roles/keycloak/defaults/main.yml +++ b/nova/core/roles/keycloak/defaults/main.yml @@ -1,12 +1,12 @@ --- # The Keycloak version and the Keycloak Config CLI version should be compatible -keycloak_version: 26.4.7 +keycloak_version: 26.5.1 # https://github.com/adorsys/keycloak-config-cli # https://hub.docker.com/r/adorsys/keycloak-config-cli/tags keycloak_config_cli_version: 6.4.0-26 -keycloak_postgres_version: 15 +keycloak_postgres_version: 15.15 # POSTGRES_VERSION_TAG keycloak_container_config_folder: /srv/keycloak keycloak_docker_network: local-network # Comes from nova.core.docker keycloak_proxy_container_name: {} # Define as role variable to restart the proxy container after Keycloak configuration @@ -16,3 +16,7 @@ keycloak_admin_password: {} # Define as role variable (Vault lookup for a an exa keycloak_postgres_db_password: {} # Define as role variable (Vault lookup for a an example) keycloak_proxy_headers: xforwarded # Refer to https://www.keycloak.org/server/reverseproxy based on your reverse proxy configuration keycloak_config_timeout_seconds: 120 # Timeout for Keycloak Config CLI to wait for Keycloak to become available + +# Configuration template file path. +# Default is the one provided in the role but this variable can be overridden to provide a custom template. +keycloak_config_template: config.j2 diff --git a/nova/core/roles/keycloak/tasks/main.yml b/nova/core/roles/keycloak/tasks/main.yml index 1602fca06..7cbf7cfe1 100644 --- a/nova/core/roles/keycloak/tasks/main.yml +++ b/nova/core/roles/keycloak/tasks/main.yml @@ -33,13 +33,15 @@ loop: - src: keycloak.yml dest: docker-compose.yml - - src: config.j2 + - src: "{{ keycloak_config_template }}" dest: config.yml - name: Composing Keycloak on {{ inventory_hostname }}... community.docker.docker_compose_v2: project_src: "{{ keycloak_container_config_folder }}" state: present + recreate: "{{ 'always' if template.changed else 'auto' }}" + build: never wait: true - name: Waiting until keycloak-config container exits... diff --git a/nova/core/roles/keycloak/templates/config.j2 b/nova/core/roles/keycloak/templates/config.j2 index ac830176d..d94f89d0c 100644 --- a/nova/core/roles/keycloak/templates/config.j2 +++ b/nova/core/roles/keycloak/templates/config.j2 @@ -188,16 +188,24 @@ clients: redirectUris: {{ client.redirect_uris }} rootUrl: {{ client.root_uri }} webOrigins: {{ client.weborigin_uris }} - protocol: openid-connect + protocol: {{ client.protocol | default('openid-connect') }} standardFlowEnabled: {{ client.standard_flow_enabled | default(true) }} implicitFlowEnabled: {{ client.implicit_flow_enabled | default(false) }} + serviceAccountsEnabled: {{ client.service_accounts_enabled | default(false) }} publicClient: {{ client.public_client | default(true) }} - secret: {{ client.client_secret | default('') }} + secret: {{ client.client_secret | default(omit) }} directAccessGrantsEnabled: {{ client.direct_access_grants_enabled | default(true) }} defaultClientScopes: - - profile - - email - {% if 'create_client_scope' in client and client.create_client_scope %} + {% if client.protocol | default('openid-connect') == 'saml' %} + {% for scope in client.default_client_scopes | default(['saml_organization','role_list']) %} + - {{ scope }} + {% endfor %} + {% else %} + {% for scope in client.default_client_scopes | default(['profile','email']) %} + - {{ scope }} + {% endfor %} + {% endif %} + {% if client.create_client_scope | default(false) %} - {{ client.client_name }}-roles {% endif %} optionalClientScopes: [] @@ -205,41 +213,65 @@ clients: authenticationFlowBindingOverrides: browser: {{ client.auth_flow_override_browser }} {% endif %} - {% if 'client_session_max_lifespan' in client or 'client_session_idle_timeout' in client %} attributes: - {% if 'access_token_lifespan' in client %} - access.token.lifespan: {{ client.access_token_lifespan }} - {% endif %} - {% if 'client_session_max_lifespan' in client %} - client.session.max.lifespan: {{ client.client_session_max_lifespan }} + {% if client.protocol | default('openid-connect') == 'openid-connect' %} + access.token.lifespan: {{ client.access_token_lifespan | default(omit) }} + client.session.max.lifespan: {{ client.client_session_max_lifespan | default(omit) }} + client.session.idle.timeout: {{ client.client_session_idle_timeout | default(omit) }} {% endif %} - {% if 'client_session_idle_timeout' in client %} - client.session.idle.timeout: {{ client.client_session_idle_timeout }} + {% if client.protocol | default('openid-connect') == 'saml' %} + saml.client.signature: {{ client.saml_require_client_signature | default(true) }} + saml.signing.certificate: {{ client.saml_client_signing_certificate | default(omit) }} + saml.encrypt: {{ client.saml_encrypt_assertions | default(false) }} + saml.encryption.certificate: {{ client.saml_assertions_encryption_certificate | default(omit) }} + saml_name_id_format: {{ client.saml_name_id_format | default("username") }} + saml_force_name_id_format: {{ client.saml_force_name_id_format | default("false") }} {% endif %} - {% endif %} {% endfor %} clientScopes: {% for client in realm.clients %} - {% if 'create_client_scope' in client and client.create_client_scope %} + {% if client.create_client_scope | default(false) %} + {% if client.protocol | default('openid-connect') == 'openid-connect' %} - name: {{ client.client_name }}-roles description: "" protocol: openid-connect attributes: - include.in.token.scope: "false" - display.on.consent.screen: "true" + include.in.token.scope: false + display.on.consent.screen: true consent.screen.text: "${rolesScopeConsentText}" + {% else %} + - name: {{ client.client_name }}-roles + description: "" + protocol: saml + attributes: + include.in.token.scope: false + display.on.consent.screen: true + consent.screen.text: "${rolesScopeConsentText}" + include.in.openid.provider.metadata: true + {% endif %} protocolMappers: + {% if client.protocol | default('openid-connect') == 'openid-connect' %} - name: "client roles" protocol: openid-connect protocolMapper: oidc-usermodel-client-role-mapper consentRequired: false config: - multivalued: "true" - id.token.claim: "true" - access.token.claim: "true" + multivalued: true + id.token.claim: true + access.token.claim: true claim.name: {{ client.client_roles_mapper_claim_name | default("resource_access.${client_id}.roles") }} jsonType.label: String usermodel.clientRoleMapping.clientId: {{ client.client_name }} + {% else %} + - name: "client roles" + protocol: saml + protocolMapper: saml-role-list-mapper + consentRequired: false + config: + single: {{ client.saml_client_roles_mapper_single_role_attribute | default("false") }} + attribute.nameformat: Basic + attribute.name: {{ client.client_roles_mapper_claim_name | default("roles") }} + {% endif %} {% endif %} {% endfor %} {% endif %} diff --git a/nova/core/roles/machine_operations/tasks/proxmox/disk_cleanup.yml b/nova/core/roles/machine_operations/tasks/proxmox/disk_cleanup.yml index 216b1ff1d..a153e6885 100644 --- a/nova/core/roles/machine_operations/tasks/proxmox/disk_cleanup.yml +++ b/nova/core/roles/machine_operations/tasks/proxmox/disk_cleanup.yml @@ -13,8 +13,7 @@ block: - name: Removing following {{ custom_vm_name | default(vm_name) }} unreferenced disks... ansible.builtin.uri: - url: - "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_exists.proxmox_vms[0].node + url: "{{ proxmox_api_url }}/nodes/{{ proxmox_vm_exists.proxmox_vms[0].node | default(proxmox_node) }}/storage/{{ proxmox_storage }}/content/{{ disk.volid }}" headers: Authorization: PVEAPIToken={{ proxmox_defaults.api_user }}!{{ proxmox_defaults.api_token_id }}={{ proxmox_defaults.api_token_secret }} @@ -45,7 +44,7 @@ msg: Timeout {{ proxmox_machine_operations_operation_timeout }}s reached when trying to remove unreferenced disks for {{ custom_vm_name | default(vm_name) }} - when: proxmox_disk_cleanup_start_time.ansible_facts.date_time.epoch | int + when: proxmox_disk_cleanup_start_time.ansible_facts.ansible_date_time.epoch | int + proxmox_machine_operations_operation_timeout < ansible_facts.date_time.epoch | int # Under high load the disk cleanup can fail without reporting back an error so looping until the disks are removed diff --git a/nova/core/roles/machine_operations/tasks/proxmox/remove.yml b/nova/core/roles/machine_operations/tasks/proxmox/remove.yml index 0eebefdbb..bef7cfdab 100644 --- a/nova/core/roles/machine_operations/tasks/proxmox/remove.yml +++ b/nova/core/roles/machine_operations/tasks/proxmox/remove.yml @@ -34,10 +34,6 @@ force: true timeout: "{{ proxmox_machine_operations_operation_timeout }}" -- name: Removing any leftover disks... - delegate_to: localhost - become: false - block: - name: Getting the time... ansible.builtin.setup: filter: "*_time" diff --git a/nova/core/roles/monolith/meta/main.yml b/nova/core/roles/monolith/meta/main.yml index 4eb29062e..3013ba29a 100644 --- a/nova/core/roles/monolith/meta/main.yml +++ b/nova/core/roles/monolith/meta/main.yml @@ -1,3 +1,4 @@ --- dependencies: - role: nova.core.docker + when: monolith_single_service is not defined diff --git a/nova/core/roles/monolith/tasks/main.yml b/nova/core/roles/monolith/tasks/main.yml index 72c1c5c15..8083537a1 100644 --- a/nova/core/roles/monolith/tasks/main.yml +++ b/nova/core/roles/monolith/tasks/main.yml @@ -1,51 +1,55 @@ --- -- name: Including prerequisites tasks... - ansible.builtin.include_tasks: - file: prerequisites.yml - -- name: Including Caddy role... - ansible.builtin.include_role: - name: nova.core.caddy - vars: - # Will be used only if no pre-existing certificate is found - caddy_self_signed_certificate_subject_alt_name: "{{ monolith_caddy_self_signed_certificate_subject_alt_name }}" - caddy_servers: "{{ monolith_default_caddy_servers + monolith_extra_caddy_servers }}" - -- name: Downloading and trusting self-signed certificate... - when: caddy_generate_self_signed_certificate is defined +# Skipping deps and proxy config when only reconfiguring single service +- name: Configuring prerequisites and proxy + when: monolith_single_service is not defined block: - - name: Getting self-signed certificate contents... - ansible.builtin.slurp: - src: /srv/certs/{{ fqdn }}_fullchain.crt - register: monolith_self_signed_certificate - - - name: Copying self-signed certificate to /usr/local/share/ca-certificates/{{ fqdn }}.crt... - ansible.builtin.copy: - content: "{{ monolith_self_signed_certificate.content | b64decode }}" - dest: /usr/local/share/ca-certificates/{{ fqdn }}.crt - mode: "0644" - - - name: Copying self-signed certificate to /usr/local/share/ca-certificates/{{ fqdn }}.crt... - ansible.builtin.copy: - content: "{{ monolith_self_signed_certificate.content | b64decode }}" - dest: /usr/local/share/ca-certificates/{{ fqdn }}.crt - mode: "0644" - delegate_to: localhost - become: true - - - name: Updating remote CA certificates... - ansible.builtin.command: update-ca-certificates --fresh - changed_when: true - retries: 5 - delay: 2 - - - name: Updating local CA certificates... - ansible.builtin.command: update-ca-certificates --fresh - changed_when: true - delegate_to: localhost - become: true - retries: 5 - delay: 2 + - name: Including prerequisites tasks... + ansible.builtin.include_tasks: + file: prerequisites.yml + + - name: Including Caddy role... + ansible.builtin.include_role: + name: nova.core.caddy + vars: + # Will be used only if no pre-existing certificate is found + caddy_self_signed_certificate_subject_alt_name: "{{ monolith_caddy_self_signed_certificate_subject_alt_name }}" + caddy_servers: "{{ monolith_default_caddy_servers + monolith_extra_caddy_servers }}" + + - name: Downloading and trusting self-signed certificate... + when: caddy_generate_self_signed_certificate is defined + block: + - name: Getting self-signed certificate contents... + ansible.builtin.slurp: + src: /srv/certs/{{ fqdn }}_fullchain.crt + register: monolith_self_signed_certificate + + - name: Copying self-signed certificate to /usr/local/share/ca-certificates/{{ fqdn }}.crt... + ansible.builtin.copy: + content: "{{ monolith_self_signed_certificate.content | b64decode }}" + dest: /usr/local/share/ca-certificates/{{ fqdn }}.crt + mode: "0644" + + - name: Copying self-signed certificate to /usr/local/share/ca-certificates/{{ fqdn }}.crt... + ansible.builtin.copy: + content: "{{ monolith_self_signed_certificate.content | b64decode }}" + dest: /usr/local/share/ca-certificates/{{ fqdn }}.crt + mode: "0644" + delegate_to: localhost + become: true + + - name: Updating remote CA certificates... + ansible.builtin.command: update-ca-certificates --fresh + changed_when: true + retries: 5 + delay: 2 + + - name: Updating local CA certificates... + ansible.builtin.command: update-ca-certificates --fresh + changed_when: true + delegate_to: localhost + become: true + retries: 5 + delay: 2 # This is so we don't need to include vault.yml if using monolith_single_service for only configuring single service - name: Checking if Vault credentials already exist... diff --git a/nova/core/roles/nexus/defaults/main.yml b/nova/core/roles/nexus/defaults/main.yml index c82658745..6132f224c 100644 --- a/nova/core/roles/nexus/defaults/main.yml +++ b/nova/core/roles/nexus/defaults/main.yml @@ -9,7 +9,7 @@ nexus_configure_ldap: false # Set to true to also configure LDAP after installat nexus_create_repos: false # Set to true to also create default repositories after installation nexus_allow_anonymous_access: true # Set to false to disable anonymous access nexus_active_encryption_key_id: Primary Encryption Key # Name of the active encryption key that comes with this role -nexus_version: 3.86.2 # Nexus version to install +nexus_version: 3.87.2 # Nexus version to install # Default is the built-in Nexus admin user. If set to a different value than admin, the role will create the user. nexus_admin_username: admin @@ -26,44 +26,50 @@ nexus_realms_to_activate: # List of authentication realms to activate nexus_ldap_administrators_group: # sAMAccountName of the pre-created group in LDAP that will be used to grant admin access to Nexus nexus_ldap_configuration: - name: "{{ nexus_ldap_name }}" # Name of the LDAP configuration - # This is here to make sure that the configuration is not created twice - id: "{{ omit if existing_ldap_servers.json == [] else existing_ldap_servers.json[0].id }}" - protocol: "{{ nexus_ldap_protocol | default('ldaps') }}" # ldap or LDAPs - useTrustStore: true - host: "{{ nexus_ldap_host }}" # FQDN of the LDAP server - port: "{{ nexus_ldap_port | default(636) }}" # Usually 389 for LDAP and 636 for LDAPs - searchBase: "{{ nexus_ldap_search_base }}" # Base DistinguishedName (DN) where to look for users and groups - authScheme: SIMPLE + authPassword: "{{ nexus_bind_dn_password }}" # Password of the service account to connect to LDAP authRealm: "" + authScheme: simple authUsername: "{{ nexus_bind_user_dn }}" # DistinguishedName (DN) of the service account to connect to LDAP - connectionTimeoutSeconds: 30 connectionRetryDelaySeconds: 300 - maxIncidentsCount: 3 - userSubtree: true - userObjectClass: user - userLdapFilter: "{{ nexus_ldap_user_filter | default('') }}" - userIdAttribute: sAMAccountName - userRealNameAttribute: cn - userEmailAddressAttribute: mail - userPasswordAttribute: "" - ldapGroupsAsRoles: true - groupType: STATIC + connectionTimeoutSeconds: 30 # The relative DN where group objects are found (e.g. ou=Group). This value will have the Search base DN value appended to form the full Group search base DN groupBaseDn: "{{ nexus_groups_dn_under_searchbase }}" - groupSubtree: true - groupObjectClass: group groupIdAttribute: sAMAccountName groupMemberAttribute: "member:1.2.840.113556.1.4.1941:" # This is a magic variable that looks for nested group memberships groupMemberFormat: ${dn} + groupObjectClass: group + groupSubtree: true + groupType: static + host: "{{ nexus_ldap_host }}" # FQDN of the LDAP server + # This is here to make sure that the configuration is not created twice + id: "{{ omit if existing_ldap_servers.json == [] + else existing_ldap_servers.json | selectattr('name', 'equalto', nexus_ldap_name) | map(attribute='id') | first }}" + ldapGroupsAsRoles: true + maxIncidentsCount: 3 + name: "{{ nexus_ldap_name }}" # Name of the LDAP configuration + port: "{{ nexus_ldap_port | default(636) }}" # Usually 389 for LDAP and 636 for LDAPs + protocol: "{{ nexus_ldap_protocol | default('ldaps') }}" # ldap or LDAPs + searchBase: "{{ nexus_ldap_search_base }}" # Base DistinguishedName (DN) where to look for users and groups + userBaseDn: "{{ nexus_users_dn_under_searchbase | default('') }}" + userEmailAddressAttribute: mail + userIdAttribute: sAMAccountName + userLdapFilter: "{{ nexus_ldap_user_filter | default('') }}" userMemberOfAttribute: "" - authPassword: "{{ nexus_bind_dn_password }}" # Password of the service account to connect to LDAP + userObjectClass: user + userPasswordAttribute: "" + userRealNameAttribute: cn + userSubtree: true + useTrustStore: true nexus_remove_unmanaged_repos: true # By default remove repositories that are not in the nexus_repositories list during deploy nexus_repositories: "{{ nexus_default_repositories + nexus_extra_repositories }}" # List of repositories to create when nexus_create_repos is set to true nexus_extra_repositories: [] # List of extra repositories to be created in addition to the nexus_default_repositories list -# The default-cleanup-policy does not actually exist in Nexus. +# List of cleanup policies to apply to each repository in nexus_repositories list +# If empty, no cleanup policy will be applied. +# The policies must already exist in Nexus. +nexus_cleanup_policies: [] + # It can be manually created and will then be automatically used by the repositories nexus_default_repositories: # Chocolatey proxy @@ -87,6 +93,8 @@ nexus_default_repositories: nugetProxy: queryCacheItemMaxAge: 3600 nugetVersion: V2 + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Docker proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/docker/proxy @@ -113,6 +121,8 @@ nexus_default_repositories: dockerProxy: cacheForeignLayers: false indexType: REGISTRY + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # GitHub Container Registry proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/docker/proxy @@ -139,6 +149,8 @@ nexus_default_repositories: dockerProxy: cacheForeignLayers: false indexType: REGISTRY + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Rubygems proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/rubygems/proxy @@ -158,6 +170,8 @@ nexus_default_repositories: httpClient: blocked: false autoBlock: true + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Kali apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -182,6 +196,8 @@ nexus_default_repositories: apt: distribution: main flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu noble-backports apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -204,6 +220,8 @@ nexus_default_repositories: apt: distribution: noble-backports flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu noble-security apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -226,6 +244,8 @@ nexus_default_repositories: apt: distribution: noble-security flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu noble-updates apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -248,6 +268,8 @@ nexus_default_repositories: apt: distribution: noble-updates flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu noble apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -270,6 +292,8 @@ nexus_default_repositories: apt: distribution: noble flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu jammy-backports apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -292,6 +316,8 @@ nexus_default_repositories: apt: distribution: jammy-backports flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu jammy-security apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -314,6 +340,8 @@ nexus_default_repositories: apt: distribution: jammy-security flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu jammy-updates apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -336,6 +364,8 @@ nexus_default_repositories: apt: distribution: jammy-updates flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu jammy apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -358,6 +388,8 @@ nexus_default_repositories: apt: distribution: jammy flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu focal-backports apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -380,6 +412,8 @@ nexus_default_repositories: apt: distribution: focal-backports flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu focal-security apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -402,6 +436,8 @@ nexus_default_repositories: apt: distribution: focal-security flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu focal-updates apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -424,6 +460,8 @@ nexus_default_repositories: apt: distribution: focal-updates flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu focal apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -446,6 +484,8 @@ nexus_default_repositories: apt: distribution: focal flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bullseye apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -468,6 +508,8 @@ nexus_default_repositories: apt: distribution: bullseye flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bullseye-security apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -490,6 +532,8 @@ nexus_default_repositories: apt: distribution: bullseye-security flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bullseye-updates apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -512,6 +556,8 @@ nexus_default_repositories: apt: distribution: bullseye-updates flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bookworm apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -534,6 +580,8 @@ nexus_default_repositories: apt: distribution: bookworm flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bookworm-security apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -556,6 +604,8 @@ nexus_default_repositories: apt: distribution: bookworm-security flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bookworm-updates apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -578,6 +628,8 @@ nexus_default_repositories: apt: distribution: bookworm-updates flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian trixie apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -600,6 +652,8 @@ nexus_default_repositories: apt: distribution: trixie flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian trixie-security apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -622,6 +676,8 @@ nexus_default_repositories: apt: distribution: trixie-security flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian trixie-updates apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -644,6 +700,8 @@ nexus_default_repositories: apt: distribution: trixie-updates flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" #################### # Docker apt proxy # @@ -670,6 +728,8 @@ nexus_default_repositories: apt: distribution: noble flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu jammy docker apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -692,6 +752,8 @@ nexus_default_repositories: apt: distribution: jammy flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Ubuntu focal docker apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -714,6 +776,8 @@ nexus_default_repositories: apt: distribution: focal flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bookworm docker apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -736,6 +800,8 @@ nexus_default_repositories: apt: distribution: bookworm flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" # Debian bullseye docker apt proxy - api_endpoint: https://{{ nexus_fqdn }}/service/rest/v1/repositories/apt/proxy @@ -758,3 +824,5 @@ nexus_default_repositories: apt: distribution: bullseye flat: false + cleanup: + policyNames: "{{ [] if nexus_cleanup_policies == [] else nexus_cleanup_policies }}" diff --git a/nova/core/roles/nexus/tasks/install.yml b/nova/core/roles/nexus/tasks/install.yml index 66b274800..fad212e01 100644 --- a/nova/core/roles/nexus/tasks/install.yml +++ b/nova/core/roles/nexus/tasks/install.yml @@ -34,6 +34,7 @@ community.docker.docker_compose_v2: project_src: "{{ nexus_container_config_folder }}" state: present + build: never wait: true register: nexus_compose diff --git a/nova/core/roles/os_configuration/defaults/main.yml b/nova/core/roles/os_configuration/defaults/main.yml index 7d80ef049..08bece08c 100644 --- a/nova/core/roles/os_configuration/defaults/main.yml +++ b/nova/core/roles/os_configuration/defaults/main.yml @@ -19,6 +19,10 @@ wait_for_timeout: 300 # How many seconds to wait for wait_for_host # Creates a snapshot when just_clone is true, set do false to skip snapshot creation os_configuration_create_snapshot_with_just_clone: true +# Set to true to keep the play running after creating snapshot +# This can be useful when during development to have a snapshot created but still continue with the play +deploy_with_mid_snapshot: false + # Set true to wait for site defined in os_configuration_wait_for_site_url respond with HTTP 200 before moving on with Linux hosts os_configuration_wait_for_site: false os_configuration_wait_for_site_url: https://google.com # URL to wait for diff --git a/nova/core/roles/os_configuration/tasks/main.yml b/nova/core/roles/os_configuration/tasks/main.yml index 2cc931cad..5ef3e06bd 100644 --- a/nova/core/roles/os_configuration/tasks/main.yml +++ b/nova/core/roles/os_configuration/tasks/main.yml @@ -84,7 +84,7 @@ # This needs to be last because the play stops after that - name: Creating snapshot, getting IP and stopping play... - when: just_clone | bool + when: just_clone | bool or deploy_with_mid_snapshot | bool block: - name: Including snapshots role... ansible.builtin.include_role: @@ -95,9 +95,12 @@ snapshot_name: Pre-customization when: os_configuration_create_snapshot_with_just_clone | bool - - name: Including get_ip role... - ansible.builtin.include_role: - name: nova.core.get_ip + - name: Stopping play if just_clone... + when: just_clone | bool + block: + - name: Including get_ip role... + ansible.builtin.include_role: + name: nova.core.get_ip - - name: Stopping play... - ansible.builtin.meta: end_host + - name: Stopping play... + ansible.builtin.meta: end_host diff --git a/nova/core/roles/os_configuration/tasks/pfsense.yml b/nova/core/roles/os_configuration/tasks/pfsense.yml index e640ed485..6965f5c0f 100644 --- a/nova/core/roles/os_configuration/tasks/pfsense.yml +++ b/nova/core/roles/os_configuration/tasks/pfsense.yml @@ -1,5 +1,5 @@ --- -# Configuring pfSense accounts in lochost to avoid installing required Python modules on the remote host +# Configuring pfSense accounts on localhost to avoid installing required Python modules on the remote host - name: Downloading config.xml from {{ inventory_hostname }}... ansible.builtin.fetch: dest: /tmp/{{ project_fullname | default('') }}_{{ inventory_hostname }}_pfsense_config.xml @@ -62,6 +62,26 @@ delegate_to: localhost become: false -- name: Reloading pfSense configuration for {{ inventory_hostname }}... - ansible.builtin.command: /etc/rc.reload_all - changed_when: true +- name: Restarting {{ custom_vm_name | default(vm_name) }} VM... + ansible.builtin.include_role: + name: nova.core.powerstate + vars: + restart: true + +- name: Waiting until ssh is down for {{ inventory_hostname }}... + ansible.builtin.wait_for: + host: "{{ ansible_host }}" + port: 22 + state: stopped + timeout: 300 + delegate_to: localhost + become: false + +- name: Waiting until ssh is up for {{ inventory_hostname }}... + ansible.builtin.wait_for: + host: "{{ ansible_host }}" + port: 22 + state: started + timeout: 300 + delegate_to: localhost + become: false diff --git a/nova/core/roles/outline/README.md b/nova/core/roles/outline/README.md index 974b38c56..3895d53b1 100644 --- a/nova/core/roles/outline/README.md +++ b/nova/core/roles/outline/README.md @@ -1,105 +1,37 @@ # outline -These are the required variables that you need to define for your environment. +This is a role for installing [Outline Wiki](https://www.getoutline.com/) using local storage and Docker Compose. -```yaml -## Database password -postgres_password: - -## For accessing the configured bucket in minio -outline_s3_access_key_id: -outline_s3_secret_access_key: - -## These secrets need to be 32 byte hex strings. Generate with "openssl rand -hex 32" -outline_secret_key: -outline_utils_secret_key: - -## Generic OIDC configuration -outline_oidc_client_id: -outline_oidc_client_secret: - -## Example with keycloak -outline_oidc_auth_uri: https://keycloak.example.net/auth/realms/EXAMPLE/protocol/openid-connect/auth -outline_oidc_token_uri: https://keycloak.example.net/auth/realms/EXAMPLE/protocol/openid-connect/token -outline_oidc_userinfo_uri: https://keycloak.example.net/auth/realms/EXAMPLE/protocol/openid-connect/userinfo -outline_oidc_logout_uri: https://keycloak.example.net/auth/realms/EXAMPLE/protocol/openid-connect/logout -## Outline landing page visual effect only -outline_oidc_display_name: "OIDC provider" -``` - -## Minio for s3 compatible file storage service - -The latest versions of outline can now store files in a filesystem mount, previously an s3 service was a requirement. -This role still uses minio for s3. We have not tested the new feature yet. +## Requirements -## Minio +`Docker` - Can be installed using the `nova.core.docker` role. +`Web Proxy` - Can be installed using the `nova.core.caddy` role. +`TLS Certificates` - Can be self-signed or obtained from a trusted CA like Let's Encrypt. Can be requested using the `nova.core.caddy` role if Caddy is accessible from the internet. -Using this role we setup minio to be hosted on the same instance as outline itself, behind a reverse proxy. The minio service opens 2 ports - one for the user facing API and one for the administration console. +## Role Variables -Once the minio service is up, using the administrators console, create a bucket for outline file storage. Add a user and assign an access policy that allows access to said bucket. -Do not leave the bucket open as a "public bucket" +Refer to the [defaults/main.yml](https://github.com/ClarifiedSecurity/nova.core/blob/main/nova/core/roles/outline/defaults/main.yml) file for a list of variables and their default values. -Bucket access policy example - take care that you define the correct bucket name. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetBucketLocation", - "s3:ListBucket", - "s3:ListBucketMultipartUploads" - ], - "Resource": ["arn:aws:s3:::odata"] - }, - { - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": ["arn:aws:s3:::odata/*"] - } - ] -} -``` +## Dependencies -## Public folders in the private bucket +`nova.core` Ansible collection installed -Outline wants to store and show user profile pictures under **bucketname/public/** and some other avatars under **bucketname/avatars/**. -Since we have locked down our bucket to be private, we have to add anonymous access rules under the bucket configuration (readonly access) +## Example -```shell -avatars/ -public/ +```yaml +# Fix any errors reported from missing variables by adding them to your project's group_vars/host_vars files. +--- +# Include all of the required dependencies in a another roles meta/main.yml file. +dependencies: + - role: nova.core.docker + + - role: nova.core.outline + + - role: nova.core.caddy + vars: + caddy_servers: + - caddy_server_name: "{{ fqdn }}" + caddy_server_listen_addresses: + - "{{ fqdn }}" + caddy_server_reverse_proxy_to_address: http://outline:3000 ``` - -## Minio automatic configuration for outline - -This role will also configure the minio service for outline, using minio client. -Default is set to **true** by variable `minio_client_configuration` - -## Reverse proxy for the services - -Configuring a reverse proxy is not in the scope of this role. -Example configuration: - -Keep in mind that you need to configure the reverse proxy for the outline service **and** the s3 service as well. - -Example - -- -> -- -> -- -> - -## Importing and exporting - -- In order for the exporting/importing functions to be successful, please make sure that you have connectivity between the outline application container and the s3 service (that can also be a container, running on the same host). -- If importing or exporting fails, make sure that there are no DNS issues (that the outline container can resolve the s3 service), no certificate trust issues etc. -- Exporting the wiki **does not include users, groups or any permission schemes**, only articles and attachments are included. Makes sense to always make backups at the OS level as well. -- Exporting the wiki includes only those collections that you have the permissions to read, even if you are admin. diff --git a/nova/core/roles/outline/defaults/main.yml b/nova/core/roles/outline/defaults/main.yml index cbaf228f0..92819df0a 100644 --- a/nova/core/roles/outline/defaults/main.yml +++ b/nova/core/roles/outline/defaults/main.yml @@ -1,59 +1,43 @@ --- -## Where do we put the docker-compose.yml -compose_config_folder: "{{ app_container_folder }}" +outline_version: 1.3.0 +outline_postgres_version: 17.7 # POSTGRES_VERSION_TAG -app_container_name: outline -redis_container_name: outline_redis -db_container_name: outline_postgres -storage_container_name: outline-storage -minio_client_container_name: outline-mc +# Pre-generated secret key for Outline +outline_secret_key: {} -outline_version: 1.1.0 -outline_postgres_version: 17 +# Pre-generated utils secret key for Outline +outline_utils_secret_key: {} +# Pre-generated Postgres password for Outline +postgres_password: {} + +# Container images app_container_image: outlinewiki/outline:{{ outline_version }} db_container_image: postgres:{{ outline_postgres_version }} redis_container_image: redis:latest -storage_container_image: minio/minio:latest -minio_client_container_image: minio/mc:latest +# Container names +app_container_name: outline +redis_container_name: outline_redis +db_container_name: outline_postgres + +# Folder paths app_container_folder: /srv/{{ app_container_name }} -redis_container_folder: "{{ app_container_folder }}/{{ redis_container_name }}" db_container_folder: "{{ app_container_folder }}/{{ db_container_name }}" -storage_container_folder: "{{ app_container_folder }}/{{ storage_container_name }}" -minio_client_container_folder: /srv/{{ minio_client_container_name }} - -## local storage folder default outline_local_file_storage_folder: "{{ app_container_folder }}/local_file_storage" +redis_container_folder: "{{ app_container_folder }}/{{ redis_container_name }}" -## remap outline container user id for local storage -outline_container_remap_uid: false -outline_container_uid: 1001 - -## Use "s3" or "local" file storage mode -outline_file_storage_mode: local - -## If we use the s3 mode, do we want to use minio client to configure the s3 service -minio_client_configuration: true +# Usually comes from nova.core.docker role +outline_docker_network: local-network -##### -## Override these in your vars.yml, as needed. -### +# Application settings outline_url: https://{{ fqdn }} -s3_fqdn: s3-{{ fqdn }} -s3_console_fqdn: s3-console-{{ fqdn }} -s3_url: https://{{ s3_fqdn }} -## Minio env defaults -minio_root_user: admin -minio_root_password: changeme_or_do_lookup_from_vault -minio_browser_redirect_url: https://{{ s3_console_fqdn }} - -## DB env defaults +# DB defaults postgres_user: outline_app postgres_db: outline -## Outline env defaults +# Node defaults node_env: production node_extra_ca_certs: /etc/ssl/certs/ca-certificates.crt node_tls_reject_unauthorized: 1 @@ -61,23 +45,19 @@ node_tls_reject_unauthorized: 1 database_url: postgres://{{ postgres_user }}:{{ postgres_password }}@{{ db_container_name }}:5432/{{ postgres_db }} redis_url: redis://{{ redis_container_name }}:6379 -outline_s3_bucket_url: https://{{ s3_fqdn }} -outline_s3_bucket_name: odata -outline_file_storage_upload_max_size: 2621440000 -aws_s3_force_path_style: "true" - -## Import +# Import outline_maximum_import_size: 512000000 -## Rate limits +# Limits outline_rate_limiter_enabled: "true" -outline_rate_limiter_requests: 100000 +outline_rate_limiter_requests: 300000 outline_rate_limiter_duration_window: 60 +outline_file_storage_upload_max_size: 2621440000 -## oidc defaults +# OIDC defaults outline_oidc_username_claim: preferred_username outline_oidc_display_name: OIDC provider outline_oidc_scopes: - - openid - email + - openid - profile diff --git a/nova/core/roles/outline/tasks/main.yml b/nova/core/roles/outline/tasks/main.yml index 30399d3f0..0dbc64c5d 100644 --- a/nova/core/roles/outline/tasks/main.yml +++ b/nova/core/roles/outline/tasks/main.yml @@ -1,75 +1,62 @@ --- -- name: Create directories - ansible.builtin.file: - path: "{{ item }}" - recurse: true - state: directory - with_items: - - "{{ app_container_folder }}" - - "{{ redis_container_folder }}" - - "{{ db_container_folder }}" - - "{{ db_container_folder }}/database-data" - -- name: Create directories for s3 mode - ansible.builtin.file: - path: "{{ item }}" - recurse: true - state: directory - with_items: - - "{{ minio_client_container_folder }}" - - "{{ storage_container_folder }}" - - "{{ storage_container_folder }}/storage-data" +- name: DEPRECATED STORAGE MODE + ansible.builtin.fail: + msg: | + The "s3" file storage mode is deprecated and cannot be no longer used with this role. + Migrate your data from s3 to local storage and remove the outline_file_storage_mode variable from your code when: + - outline_file_storage_mode is defined - outline_file_storage_mode == "s3" -- name: Create directories for local filestorage mode +- name: MISSING REQUIRED VARIABLES + ansible.builtin.fail: + msg: | + You are missing following variables: + + {% for variable in required_variables %} + {% if vars[variable] is defined %} + {% set evaluated = lookup('vars', variable) %} + {% if evaluated is ansible.builtin.falsy %} + - {{ variable }} + {% endif %} + {% else %} + - {{ variable }} + {% endif %} + {% endfor %} + + Make sure they are added to your project and passed to the role correctly. + vars: + required_variables: + - outline_secret_key + - outline_utils_secret_key + - postgres_password + when: outline_secret_key | default(false) is ansible.builtin.falsy + or outline_utils_secret_key | default(false) is ansible.builtin.falsy + or postgres_password | default(false) is ansible.builtin.falsy + +- name: Create directories ansible.builtin.file: - path: "{{ item }}" + path: "{{ item.path }}" recurse: true state: directory - owner: "{{ outline_container_uid }}" + owner: "{{ item.owner | default(omit) }}" with_items: - - "{{ outline_local_file_storage_folder }}" - when: - - outline_file_storage_mode == "local" - -## MINIO CLIENT for automating the minio configuration -- name: Templating minio bucket policy from template for S3... - ansible.builtin.template: - src: bucket-policy.j2 - dest: "{{ minio_client_container_folder }}/bucket-policy.json" - mode: "0600" - when: - - minio_client_configuration | bool - - outline_file_storage_mode == "s3" - -- name: Templating compose file for minio client for S3... - ansible.builtin.template: - src: docker-compose-mc.yml.j2 - dest: "{{ minio_client_container_folder }}/docker-compose.yml" - mode: "0600" - when: - - minio_client_configuration | bool - - outline_file_storage_mode == "s3" - -- name: Composing {{ inventory_hostname }} minio client for S3... - community.docker.docker_compose_v2: - project_src: "{{ minio_client_container_folder }}" - state: present - wait: true - when: - - minio_client_configuration | bool - - outline_file_storage_mode == "s3" + - path: "{{ app_container_folder }}" + - path: "{{ redis_container_folder }}" + - path: "{{ db_container_folder }}" + - path: "{{ db_container_folder }}/database-data" + - path: "{{ outline_local_file_storage_folder }}" + owner: 1001 # nodejs user inside the container -## Outline main compose - name: Templating Docker Compose file for Outline... ansible.builtin.template: - src: docker-compose.yml.j2 - dest: "{{ compose_config_folder }}/docker-compose.yml" + src: docker-compose.j2 + dest: "{{ app_container_folder }}/docker-compose.yml" mode: "0600" - name: Composing Outline on {{ inventory_hostname }}... community.docker.docker_compose_v2: - project_src: "{{ compose_config_folder }}" + project_src: "{{ app_container_folder }}" state: present + build: never wait: true diff --git a/nova/core/roles/outline/templates/bucket-policy.j2 b/nova/core/roles/outline/templates/bucket-policy.j2 deleted file mode 100644 index c49d0d7ed..000000000 --- a/nova/core/roles/outline/templates/bucket-policy.j2 +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetBucketLocation", - "s3:ListBucket", - "s3:ListBucketMultipartUploads" - ], - "Resource": [ - "arn:aws:s3:::{{ outline_s3_bucket_name }}" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::{{ outline_s3_bucket_name }}/*" - ] - } - ] -} \ No newline at end of file diff --git a/nova/core/roles/outline/templates/docker-compose-mc.yml.j2 b/nova/core/roles/outline/templates/docker-compose-mc.yml.j2 deleted file mode 100644 index 416fb8ff8..000000000 --- a/nova/core/roles/outline/templates/docker-compose-mc.yml.j2 +++ /dev/null @@ -1,26 +0,0 @@ -services: - ## minio client - {{ minio_client_container_name }}: - image: "{{ minio_client_container_image }}" - container_name: "{{ minio_client_container_name }}" - hostname: "{{ minio_client_container_name }}" - entrypoint: > - /bin/sh -c " - until (/usr/bin/mc config host add myminio http://{{ storage_container_name }}:9000 admin {{ minio_root_password }}) do echo '...waiting...' && - sleep 1; - done; - /usr/bin/mc alias set myminio http://{{ storage_container_name }}:9000 admin {{ minio_root_password }} ; - /usr/bin/mc admin policy create myminio/ outline_policy /tmp/bucket-policy.json ; - /usr/bin/mc admin user add myminio/ {{ outline_s3_access_key_id }} {{ outline_s3_secret_access_key }} ; - /usr/bin/mc admin policy attach myminio/ outline_policy --user {{ outline_s3_access_key_id }} ; - /usr/bin/mc mb myminio/{{ outline_s3_bucket_name }} --ignore-existing ; - /usr/bin/mc anonymous set download myminio/{{ outline_s3_bucket_name }}/public ; - /usr/bin/mc anonymous set download myminio/{{ outline_s3_bucket_name }}/avatars ; - " - volumes: - - "{{ minio_client_container_folder }}/bucket-policy.json:/tmp/bucket-policy.json" - -networks: - default: - name: local-network - external: true diff --git a/nova/core/roles/outline/templates/docker-compose.yml.j2 b/nova/core/roles/outline/templates/docker-compose.j2 similarity index 62% rename from nova/core/roles/outline/templates/docker-compose.yml.j2 rename to nova/core/roles/outline/templates/docker-compose.j2 index b03f3859e..687c82041 100644 --- a/nova/core/roles/outline/templates/docker-compose.yml.j2 +++ b/nova/core/roles/outline/templates/docker-compose.j2 @@ -1,27 +1,6 @@ --- services: -{% if outline_file_storage_mode == "s3" %} - ## Minio for storage - {{ storage_container_name }}: - container_name: "{{ storage_container_name }}" - hostname: "{{ storage_container_name }}" - image: "{{ storage_container_image }}" - restart: unless-stopped - volumes: - - "{{ storage_container_folder }}/storage-data:/data" - environment: - - MINIO_ROOT_USER={{ minio_root_user }} - - MINIO_ROOT_PASSWORD={{ minio_root_password }} - - MINIO_BROWSER_REDIRECT_URL={{ minio_browser_redirect_url }} - command: "minio server /data --console-address ':9090'" - logging: - driver: json-file - options: - max-size: 250m - max-file: "1" -{% endif %} - - {{ db_container_name }}: + "{{ db_container_name }}": container_name: "{{ db_container_name }}" hostname: "{{ db_container_name }}" image: "{{ db_container_image }}" @@ -45,7 +24,7 @@ services: max-size: 250m max-file: "1" - {{ redis_container_name }}: + "{{ redis_container_name }}": container_name: "{{ redis_container_name }}" hostname: "{{ redis_container_name }}" image: "{{ redis_container_image }}" @@ -61,66 +40,51 @@ services: max-size: 250m max-file: "1" - {{ app_container_name }}: + "{{ app_container_name }}": container_name: "{{ app_container_name }}" hostname: "{{ app_container_name }}" image: "{{ app_container_image }}" restart: unless-stopped -{% if outline_file_storage_mode == "local" and outline_container_remap_uid == true %} - user: {{ outline_container_user_id }}:{{ outline_container_user_id }} -{% endif %} volumes: - - "/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt" -{% if outline_file_storage_mode == "local" %} + - /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt - "{{ outline_local_file_storage_folder }}:/var/lib/outline/data" -{% endif %} environment: - - DATABASE_CONNECTION_POOL_MIN=5 - DATABASE_CONNECTION_POOL_MAX=25 + - DATABASE_CONNECTION_POOL_MIN=5 + - DATABASE_URL={{ database_url }} + - DEBUG=http + - DEFAULT_LANGUAGE=en_US + - ENABLE_UPDATES=false + - FILE_STORAGE_IMPORT_MAX_SIZE={{ outline_maximum_import_size }} + - FILE_STORAGE_UPLOAD_MAX_SIZE={{ outline_file_storage_upload_max_size }} + - FILE_STORAGE=local + - FORCE_HTTPS={{ outline_force_https | default("true") }} - NODE_ENV={{ node_env }} - NODE_EXTRA_CA_CERTS={{ node_extra_ca_certs }} - NODE_TLS_REJECT_UNAUTHORIZED={{ node_tls_reject_unauthorized }} - - SECRET_KEY={{ outline_secret_key }} - - UTILS_SECRET={{ outline_utils_secret_key }} - - DATABASE_URL={{ database_url }} - - PGSSLMODE=disable - - REDIS_URL={{ redis_url }} - - URL={{ outline_url }} -{% if outline_file_storage_mode == "s3" %} - - AWS_ACCESS_KEY_ID={{ outline_s3_access_key_id }} - - AWS_SECRET_ACCESS_KEY={{ outline_s3_secret_access_key }} - - AWS_REGION=us-east-2 - - AWS_S3_UPLOAD_BUCKET_URL={{ outline_s3_bucket_url }} - - AWS_S3_UPLOAD_BUCKET_NAME={{ outline_s3_bucket_name }} - - AWS_S3_FORCE_PATH_STYLE={{ aws_s3_force_path_style }} - - AWS_S3_ACL=private -{% endif %} -{% if outline_file_storage_mode == "local" %} - - FILE_STORAGE=local -{% endif %} - - FILE_STORAGE_UPLOAD_MAX_SIZE={{ outline_file_storage_upload_max_size }} + - OIDC_AUTH_URI={{ outline_oidc_auth_uri }} - OIDC_CLIENT_ID={{ outline_oidc_client_id }} - OIDC_CLIENT_SECRET={{ outline_oidc_client_secret }} - - OIDC_AUTH_URI={{ outline_oidc_auth_uri }} + - OIDC_DISPLAY_NAME={{ outline_oidc_display_name }} + - OIDC_LOGOUT_URI={{ outline_oidc_logout_uri | default("") }} + - OIDC_SCOPES={{ outline_oidc_scopes | join(' ') }} - OIDC_TOKEN_URI={{ outline_oidc_token_uri }} - OIDC_USERINFO_URI={{ outline_oidc_userinfo_uri }} - - OIDC_LOGOUT_URI={{ outline_oidc_logout_uri | default("") }} - OIDC_USERNAME_CLAIM={{ outline_oidc_username_claim }} - - OIDC_DISPLAY_NAME={{ outline_oidc_display_name }} - - OIDC_SCOPES={{ outline_oidc_scopes | join(' ') }} - - FORCE_HTTPS={{ outline_force_https | default("true") }} - - ENABLE_UPDATES=false - - WEB_CONCURRENCY=1 - - FILE_STORAGE_IMPORT_MAX_SIZE={{ outline_maximum_import_size }} - - DEBUG=http - - DEFAULT_LANGUAGE=en_US + - PGSSLMODE=disable + - RATE_LIMITER_DURATION_WINDOW={{ outline_rate_limiter_duration_window }} - RATE_LIMITER_ENABLED={{ outline_rate_limiter_enabled }} - RATE_LIMITER_REQUESTS={{ outline_rate_limiter_requests }} - - RATE_LIMITER_DURATION_WINDOW={{ outline_rate_limiter_duration_window }} - command: sh -c "yarn sequelize db:migrate --env production-ssl-disabled && yarn start --env production-ssl-disabled" + - REDIS_URL={{ redis_url }} + - SECRET_KEY={{ outline_secret_key }} + - URL={{ outline_url }} + - UTILS_SECRET={{ outline_utils_secret_key }} + - WEB_CONCURRENCY=1 depends_on: - - {{ redis_container_name }} - - {{ db_container_name }} + - "{{ redis_container_name }}" + - "{{ db_container_name }}" + healthcheck: + retries: 3 # Default healthcheck from Dockerfile does not have retries and may report unhealthy during high load or low-end systems logging: driver: json-file options: @@ -129,5 +93,5 @@ services: networks: default: - name: local-network + name: "{{ outline_docker_network }}" external: true diff --git a/nova/core/roles/providentia/tasks/clone_and_build.yml b/nova/core/roles/providentia/tasks/clone_and_build.yml index 330b3d486..cf31ab4b7 100644 --- a/nova/core/roles/providentia/tasks/clone_and_build.yml +++ b/nova/core/roles/providentia/tasks/clone_and_build.yml @@ -14,6 +14,7 @@ community.docker.docker_compose_v2: project_src: "{{ providentia_install_dir }}" state: absent + build: never wait: true when: providentia_prebuilt_compose_file.stat.exists diff --git a/nova/core/roles/providentia/tasks/prebuilt_image.yml b/nova/core/roles/providentia/tasks/prebuilt_image.yml index 74066bb8a..5a014a67e 100644 --- a/nova/core/roles/providentia/tasks/prebuilt_image.yml +++ b/nova/core/roles/providentia/tasks/prebuilt_image.yml @@ -46,4 +46,5 @@ community.docker.docker_compose_v2: project_src: "{{ providentia_install_dir }}" state: present + build: never wait: true diff --git a/nova/core/roles/secrets_to_vault/defaults/main.yml b/nova/core/roles/secrets_to_vault/defaults/main.yml index 0b96829eb..a3b20224e 100644 --- a/nova/core/roles/secrets_to_vault/defaults/main.yml +++ b/nova/core/roles/secrets_to_vault/defaults/main.yml @@ -3,6 +3,10 @@ # Accounts # ############ +# Set to false to skip saving accounts list passwords to Vault +# This can be useful when saving pre_deploy_secrets only +secrets_to_vault_account_passwords: true + # Vault key name when saving account passwords to Vault to make sure it's unique across the inventory. account_key_name: "{{ inventory_hostname }}_{{ sct.username }}" diff --git a/nova/core/roles/secrets_to_vault/tasks/save_pre_secrets_and_accounts.yml b/nova/core/roles/secrets_to_vault/tasks/save_pre_secrets_and_accounts.yml index db5029c4b..3b9e1f9f6 100644 --- a/nova/core/roles/secrets_to_vault/tasks/save_pre_secrets_and_accounts.yml +++ b/nova/core/roles/secrets_to_vault/tasks/save_pre_secrets_and_accounts.yml @@ -5,7 +5,7 @@ # Also only set if customization_context is host meaning not saving account passwords for containers - name: Setting accounts variable ansible.builtin.set_fact: - accounts: "{{ (user_accounts + admin_accounts + domain_user_accounts) if customization_context == 'host' else [] }}" + accounts: "{{ [] if not secrets_to_vault_account_passwords or customization_context != 'host' else user_accounts + admin_accounts + domain_user_accounts }}" - name: Appending Vault path to each secret... ansible.builtin.set_fact: diff --git a/nova/core/roles/snapshots/proxmox/snapshot.yml b/nova/core/roles/snapshots/proxmox/snapshot.yml index 6acdb54eb..f21b62a41 100644 --- a/nova/core/roles/snapshots/proxmox/snapshot.yml +++ b/nova/core/roles/snapshots/proxmox/snapshot.yml @@ -35,3 +35,12 @@ retention: "{{ 1 if snapshot_mode == 'clean-snap' else omit }}" vmstate: "{{ true if live_snap else false }}" # Memory dump timeout: "{{ proxmox_machine_operations_operation_timeout }}" + +- name: Including powerstate role... + ansible.builtin.include_role: + name: nova.core.powerstate + vars: + poweron: true + when: + - start_vm_after_snapshot | bool # To be able to pass this as a cli extra var -e start_vm_after_snapshot=true/false + - not live_snap | bool # To be able to pass this as a cli extra var -e live_snap=true/false diff --git a/nova/core/roles/template_os_configuration/defaults/main.yml b/nova/core/roles/template_os_configuration/defaults/main.yml index b0735e9a5..e3deafc8f 100644 --- a/nova/core/roles/template_os_configuration/defaults/main.yml +++ b/nova/core/roles/template_os_configuration/defaults/main.yml @@ -7,3 +7,7 @@ template_os_configuration_update_system: true # Update OS during this role # Increasing MaxAuthTries in sshd_config to avoid issues where user has more then 6 keys in their SSH agent template_os_configuration_increase_maxauthtries: true template_os_configuration_increase_maxauthtries_value: 20 + +# Will remove existing and creating a fresh snapshot after all configuration is done on the template VM +# Set to false to skip snapshot creation +template_os_configuration_create_snapshot: true diff --git a/nova/core/roles/template_os_configuration/tasks/main.yml b/nova/core/roles/template_os_configuration/tasks/main.yml index acc91a53b..e464d7dba 100644 --- a/nova/core/roles/template_os_configuration/tasks/main.yml +++ b/nova/core/roles/template_os_configuration/tasks/main.yml @@ -65,6 +65,15 @@ snapshot_mode: clean-snap snapshot_name: LinkedCloneSource start_vm_after_snapshot: false + when: template_os_configuration_create_snapshot + +# Shutting down the VM only if snapshotting is disabled since the snapshot role will shut down the VM itself +- name: Shutting down {{ custom_vm_name | default(vm_name) }} VM... + ansible.builtin.include_role: + name: nova.core.powerstate + vars: + shutdown: true + when: not template_os_configuration_create_snapshot - name: Ending play for templates... ansible.builtin.meta: end_host diff --git a/nova/core/roles/tuoni/defaults/main.yml b/nova/core/roles/tuoni/defaults/main.yml index 3fce5168e..7c20be769 100644 --- a/nova/core/roles/tuoni/defaults/main.yml +++ b/nova/core/roles/tuoni/defaults/main.yml @@ -17,6 +17,7 @@ tuoni_licence_key: {} # Set the Tuoni licence key to install licensed plugins tuoni_users: # The list of users that will be created in Tuoni pass a custom list as a variable to override this default list - username: operator password: Operator1Operator1 # Make sure to change this password + update_password: true # Set to false to not update the password on each deploy authorities: - MANAGE_AGENTS - MANAGE_LISTENERS diff --git a/nova/core/roles/tuoni/tasks/users.yml b/nova/core/roles/tuoni/tasks/users.yml index 7f2bba454..b5479c182 100644 --- a/nova/core/roles/tuoni/tasks/users.yml +++ b/nova/core/roles/tuoni/tasks/users.yml @@ -21,7 +21,7 @@ - 201 # Created validate_certs: false # Because Tuoni uses self-signed certificate with an empty Subject Name body: "{{ item }}" - loop: "{{ tuoni_users }}" + loop: "{{ tuoni_users | map('combine', {'update_password': omit}) | list }}" loop_control: label: "{{ item.username }}" when: item.username not in tuoni_existing_users.json | map(attribute='username') @@ -63,4 +63,6 @@ loop: "{{ tuoni_users }}" loop_control: label: "{{ item.username }}" - when: item.username in tuoni_existing_users.json | map(attribute='username') + when: + - item.username in tuoni_existing_users.json | map(attribute='username') + - item.update_password | default(true) diff --git a/nova/core/roles/updates/tasks/redhat_family.yml b/nova/core/roles/updates/tasks/redhat_family.yml index 8f66a0a42..2468765a7 100644 --- a/nova/core/roles/updates/tasks/redhat_family.yml +++ b/nova/core/roles/updates/tasks/redhat_family.yml @@ -1,16 +1,9 @@ --- -- name: Gathering package facts for {{ inventory_hostname }}... - ansible.builtin.package_facts: - manager: auto - # Since by default RedHat boot partition is only 1GB and kernel updates can quickly fill it up - name: Keeping only latest {{ updates_redhat_family_nr_of_kernels_to_keep }} kernel versions for {{ inventory_hostname }}... ansible.builtin.command: dnf remove --oldinstallonly --setopt installonly_limit={{ updates_redhat_family_nr_of_kernels_to_keep }} -y kernel register: dnf_remove_output changed_when: dnf_remove_output.stdout is search("Remove.*Packages") - when: - - ansible_facts.packages.kernel is defined - - ansible_facts.packages.kernel | length > updates_redhat_family_nr_of_kernels_to_keep - name: Updating all packages... ansible.builtin.dnf: diff --git a/nova/core/roles/vault/defaults/main.yml b/nova/core/roles/vault/defaults/main.yml index a22a764a5..9fb36a98e 100644 --- a/nova/core/roles/vault/defaults/main.yml +++ b/nova/core/roles/vault/defaults/main.yml @@ -9,7 +9,7 @@ vault_configure: false # Run the initial default configuration for Vault vault_configure_ldap: false # Configuring LDAP authentication for Vault vault_configuration_uri: "https://{{ fqdn }}" # Defined separately for when configuring multiple Vaults vault_config_folder: /srv/vault -vault_version: 1.21.1 # Default Vault Docker image version +vault_version: 1.21.2 # Default Vault Docker image version vault_container_name: vault vault_server_log_level: debug # Debug gives better info about LDAP login failures and their reasons vault_audit_logging: false # Enable Vault audit logging diff --git a/nova/core/roles/vault/tasks/install.yml b/nova/core/roles/vault/tasks/install.yml index f028cacfc..5501d19dd 100644 --- a/nova/core/roles/vault/tasks/install.yml +++ b/nova/core/roles/vault/tasks/install.yml @@ -34,6 +34,7 @@ community.docker.docker_compose_v2: project_src: "{{ vault_config_folder }}" state: present + build: never wait: true - name: Finding and restarting proxy container... diff --git a/nova/core/roles/vault/tasks/main.yml b/nova/core/roles/vault/tasks/main.yml index de36bf715..d809178c1 100644 --- a/nova/core/roles/vault/tasks/main.yml +++ b/nova/core/roles/vault/tasks/main.yml @@ -27,6 +27,7 @@ - not vault_seal_status.json.sealed retries: 60 delay: 5 + when: vault_configure - name: Including creation tasks for following Root CAs... ansible.builtin.include_tasks: ca_roots.yml diff --git a/nova/core/roles/win_sysprep/tasks/main.yml b/nova/core/roles/win_sysprep/tasks/main.yml index 743d51f35..7d206db69 100644 --- a/nova/core/roles/win_sysprep/tasks/main.yml +++ b/nova/core/roles/win_sysprep/tasks/main.yml @@ -9,6 +9,7 @@ block: - name: Removing sysprep-blocking packages from {{ ansible_facts.distribution }}... ansible.windows.win_shell: | + Get-AppxPackage Microsoft.549981C3F5F10* | Remove-AppPackage Get-AppxPackage Microsoft.BingSearch* | Remove-AppPackage Get-AppxPackage Microsoft.Copilot* | Remove-AppPackage Get-AppxPackage Microsoft.Edge.GameAssist* | Remove-AppPackage