diff --git a/.github/styles/config/vocabularies/Ansible/accept.txt b/.github/styles/config/vocabularies/Ansible/accept.txt index d5538ab1..523322f5 100644 --- a/.github/styles/config/vocabularies/Ansible/accept.txt +++ b/.github/styles/config/vocabularies/Ansible/accept.txt @@ -1,46 +1,56 @@ (?i)ansible -(?i)openstack +(?i)APIs (?i)balenaEtcher -(?i)UNetbootin -(?i)SELinux -(?i)bootable -(?i)bootloader +(?i)Ceph +(?i)check_dependencies +(?i)config +(?i)configs (?i)Ctrl +(?i)Cirros +(?i)cirros +(?i)cell_discovery +(?i)env +(?i)Fernet +(?i)Hostname +(?i)hostnames +(?i)homebrew +(?i)Idempotency +(?i)inventory_docs +(?i)kvm +(?i)kvm_config +(?i)keypair +(?i)Libvirts? +(?i)Lubuntu +(?i)linux +(?i)macoss? +(?i)misconfigured +(?i)multipass? +(?i)netplan +(?i)networkd +(?i)nova_api +(?i)nova_controller +(?i)nova_compute +(?i)ntp +(?i)OAuth +(?i)openstack +(?i)os +(?i)Pulumi +(?i)qemu +(?i)sudo(ers?)? (?i)Subnet -(?i)Nameservers +(?i)systemd +(?i)test_vm_launch (?i)ufw (?i)Ubuntu -(?i)ntp (?i)update_cache -(?i)inventory_docs -(?i)Pulumi -(?i)sudo(ers?)? -(?i)linux -(?i)homebrew -(?i)Kubuntu -(?i)Lubuntu -(?i)VPNs -(?i)networkd (?i)virtualized -(?i)systemd -(?i)minimalistic -(?i)netplan -(?i)Libvirts? (?i)v?lans? (?i)v?networks? -(?i)ethernets? -(?i)multipass? (?i)v?switchs? (?i)vms? -(?i)macoss? -(?i)oss? -(?i)config -(?i)configs -(?i)Cirros -(?i)Idempotency -(?i)hostnames -(?i)Fernet -(?i)Ceph -(?i)OAuth -(?i)APIs -(?i)cirros +(?i)VPNs +(?i)bootable +(?i)bootloader +(?i)UNetbootin +(?i)SELinux +(?i)Kubuntu diff --git a/playbooks/ansible-openstack-nova/.vscode/settings.json b/playbooks/ansible-openstack-nova/.vscode/settings.json new file mode 100644 index 00000000..5d71af86 --- /dev/null +++ b/playbooks/ansible-openstack-nova/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "ansible.python.interpreterPath": "/bin/python3" +} \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/LICENSE b/playbooks/ansible-openstack-nova/LICENSE new file mode 100644 index 00000000..80b659b6 --- /dev/null +++ b/playbooks/ansible-openstack-nova/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 onelrian + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/README.md b/playbooks/ansible-openstack-nova/README.md new file mode 100644 index 00000000..c925a957 --- /dev/null +++ b/playbooks/ansible-openstack-nova/README.md @@ -0,0 +1,192 @@ +# OpenStack Nova Setup with Vagrant and Ansible + +This project provides an automated setup for a minimal OpenStack Nova environment using Vagrant and Ansible. It creates two virtual machines (controller and compute) and deploys a basic OpenStack Nova setup with all necessary services. + +## Project Overview + +The setup includes: +- **Controller VM**: Runs OpenStack control plane services + - Keystone (Identity) + - Glance (Image) + - Placement (Resource tracking) + - Nova Controller +- **Compute VM**: Runs Nova compute service +- **Libvirt/KVM**: Used as the hypervisor +- **Ansible**: Used for provisioning and configuration management + +## Architecture + +``` ++------------------+ +------------------+ +| Controller | | Compute | +| | | | +| Keystone | | Nova Compute | +| Glance | | Libvirt/KVM | +| Placement | | | +| Nova Controller | | | ++------------------+ +------------------+ + | | + +------------------------+ + | + Management Network + | + (192.168.56.0/24) +``` + +## Prerequisites + +- Linux system with KVM support +- Minimum 8GB RAM and 2 CPU cores +- Internet connectivity (for initial setup) + +## Quick Start + +1. **Basic Setup**: + ```bash + ./setup.sh + ``` + +2. **Access the VMs**: + ```bash + # SSH into controller + vagrant ssh controller + + # SSH into compute node + vagrant ssh compute + ``` + +3. **Test the Setup**: + ```bash + ./test-setup.sh + ``` + +4. **Cleanup**: + ```bash + ./cleanup.sh + ``` + +## Advanced Usage + +### Handling Network Issues + +If you encounter network connectivity issues with the default box: + +1. **Automatic local box creation**: + ```bash + # The setup script will automatically try to create a local box + ./setup.sh + ``` + +2. **Manual local box creation**: + ```bash + # Create and add a local box manually + ./add-local-box.sh + + # Use the local box + VAGRANT_BOX=ubuntu2004 ./setup.sh + ``` + +3. **Offline mode** (requires pre-installed boxes): + ```bash + ./setup.sh --offline + ``` + +### Environment Variables + +- `VAGRANT_BOX`: Specify a different Vagrant box (default: generic/ubuntu2004) +- `CONTROLLER_IP`: Controller VM IP address (default: 192.168.56.10) +- `COMPUTE_IP`: Compute VM IP address (default: 192.168.56.11) + +### Script Options + +**setup.sh**: +```bash +./setup.sh # Basic setup +./setup.sh --force-provision # Force Ansible provisioning +./setup.sh --offline # Offline mode (requires pre-installed boxes) +./setup.sh --cleanup # Cleanup after setup +VAGRANT_BOX=ubuntu2004 ./setup.sh # Use a specific box +``` + +**cleanup.sh**: +```bash +./cleanup.sh # Basic cleanup +./cleanup.sh --force # Force cleanup without playbook success check +``` + +**add-local-box.sh**: +```bash +./add-local-box.sh # Create and add default local box +./add-local-box.sh --box-name=mybox # Use custom box name +./add-local-box.sh --box-file=/path/to/box # Add existing box file +``` + +## Testing the Setup + +After successful setup: +1. SSH into the controller VM: `vagrant ssh controller` +2. Source the OpenStack admin credentials: `source ~/admin-openrc.sh` +3. Run OpenStack commands: + ```bash + openstack server list + openstack image list + openstack network list + ``` + +## Project Structure + +``` +├── setup.sh # Main setup script +├── cleanup.sh # Cleanup script +├── add-local-box.sh # Local box creation helper +├── test-setup.sh # Setup verification script +├── Vagrantfile # Vagrant configuration +├── ansible.cfg # Ansible configuration +├── requirements.yml # Ansible collections requirements +├── inventory/ # Ansible inventory files +├── playbooks/ # Ansible playbooks +└── roles/ # Ansible roles for each service +``` + +## Services Deployed + +- **Keystone**: Identity service with default admin user +- **Glance**: Image service with CirrOS test image +- **Placement**: Resource tracking for Nova +- **Nova**: Compute service with controller and compute components +- **MariaDB**: Database backend for all services +- **RabbitMQ**: Message queue for inter-service communication + +## Troubleshooting + +### Box Download Issues +If the setup fails due to box download issues: +1. Try running `./add-local-box.sh` to create a local box +2. Use `VAGRANT_BOX=ubuntu2004 ./setup.sh` to use the local box +3. Check network connectivity and firewall settings + +### VM Provisioning Failures +If VM provisioning fails: +1. Check `vagrant_up.log` for detailed error messages +2. Try `./setup.sh --force-provision` to re-run Ansible +3. Verify system resources (RAM, CPU, disk space) + +### Service Access Issues +If you cannot access OpenStack services: +1. Verify VMs are running: `vagrant status` +2. Check service status inside controller VM +3. Verify network connectivity between VMs + +## Security Notes + +- Default passwords are used for demonstration purposes only +- Host key checking is disabled for development convenience +- Not suitable for production use without security hardening + +## Requirements + +- Vagrant >= 2.4.1 +- vagrant-libvirt plugin >= 0.12.2 +- libvirt/KVM +- Ansible >= 8.7.0 +- Minimum 8GB RAM and 2 CPU cores \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/SECURITY.md b/playbooks/ansible-openstack-nova/SECURITY.md new file mode 100644 index 00000000..b09c0185 --- /dev/null +++ b/playbooks/ansible-openstack-nova/SECURITY.md @@ -0,0 +1,163 @@ +# Security Considerations for OpenStack Nova Deployment + +## Overview +This document outlines critical security considerations for the OpenStack Nova deployment project. While this is designed for development/testing environments, production deployments require additional security hardening. + +## Current Security Configuration + +### Database Security +- **MariaDB Configuration**: Uses Unix socket authentication for root access +- **Database Users**: Dedicated OpenStack database user with limited privileges +- **Network Access**: Database allows connections from compute nodes (% wildcard) +- **⚠️ Production Concern**: Database passwords are stored in plain text in variable files + +### Authentication & Authorization +- **Keystone Integration**: All services properly registered with Keystone +- **Service Users**: Dedicated service users for each OpenStack component +- **Role Assignments**: Proper admin role assignments in service project +- **Token Security**: Fernet tokens configured for Keystone + +### Network Security +- **SSH Configuration**: Vagrant SSH keys with proper permissions (600) +- **Host Key Checking**: Disabled in ansible.cfg for development (⚠️ Security Risk) +- **Firewall**: Uses NoopFirewallDriver for simplicity (⚠️ Production Risk) +- **VNC Access**: Configured to listen on all interfaces (0.0.0.0) + +### System Security +- **AppArmor**: Disabled for compatibility (⚠️ Security Trade-off) +- **Swap**: Disabled to prevent memory dumps +- **User Permissions**: Proper service user configurations +- **File Permissions**: Restrictive permissions on configuration files (640) + +## Production Security Recommendations + +### 1. Credential Management +```bash +# Use Ansible Vault for sensitive data +ansible-vault encrypt inventory/group_vars/all.yml + +# Or use external secret management +# - HashiCorp Vault +# - AWS Secrets Manager +# - Azure Key Vault +``` + +### 2. Network Security +```yaml +# Enable proper firewall in nova.conf +firewall_driver = nova.virt.firewall.IptablesFirewall +# Enable SSL/TLS for API endpoints +ssl_cert_file = /path/to/cert.pem +ssl_key_file = /path/to/key.pem +``` + +### 3. Database Security +```yaml +# Use SSL for database connections +database_connection: mysql+pymysql://user:pass@host/db?ssl_ca=/path/to/ca.pem + +# Restrict database access by IP +# Replace % wildcard with specific IP addresses +``` + +### 4. System Hardening +```bash +# Enable AppArmor/SELinux +sudo systemctl enable apparmor +sudo systemctl start apparmor + +# Configure proper firewall rules +sudo ufw enable +sudo ufw allow 22/tcp # SSH +sudo ufw allow 5000/tcp # Keystone +sudo ufw allow 8774/tcp # Nova API +sudo ufw allow 8778/tcp # Placement +sudo ufw allow 9292/tcp # Glance +``` + +### 5. Monitoring & Auditing +```yaml +# Enable audit logging in nova.conf +[audit] +enabled = true +audit_map_file = /etc/nova/api_audit_map.conf +``` + +## Security Checklist for Production + +### Pre-Deployment +- [ ] Encrypt all sensitive variables with Ansible Vault +- [ ] Review and harden all default passwords +- [ ] Configure SSL/TLS certificates for all API endpoints +- [ ] Set up proper firewall rules +- [ ] Enable host key checking in Ansible +- [ ] Configure proper backup and disaster recovery + +### Post-Deployment +- [ ] Change all default service passwords +- [ ] Enable audit logging for all services +- [ ] Set up monitoring and alerting +- [ ] Configure log rotation and retention +- [ ] Perform security vulnerability scanning +- [ ] Set up regular security updates + +### Network Security +- [ ] Isolate management network from tenant networks +- [ ] Configure VPN access for administrative tasks +- [ ] +Use network segmentation and VLANs +- [ ] Implement intrusion detection systems +- [ ] Configure rate limiting for API endpoints + +### Access Control +- [ ] Implement multi-factor authentication +- [ ] Set up role-based access control (RBAC) +- [ ] Regular access reviews and cleanup +- [ ] Implement session timeout policies + +## Known Security Limitations (Development Environment) + +1. **Plain Text Passwords**: All service passwords stored in plain text +2. **Disabled Host Key Checking**: SSH connections don't verify host keys +3. **NoopFirewallDriver**: No network filtering between instances +4. **Disabled AppArmor**: Reduced system-level security +5. **Permissive Network Configuration**: Services listen on all interfaces +6. **No SSL/TLS**: All API communications in plain text +7. **Default Credentials**: Using predictable default passwords + +## Incident Response + +### Security Breach Response +1. Isolate affected systems immediately +2. Preserve logs and evidence +3. Notify security team and stakeholders +4. Begin forensic analysis +5. Implement containment measures +6. Plan recovery and remediation + +### Log Monitoring +Monitor these critical events: +- Failed authentication attempts +- Privilege escalation attempts +- Unusual API access patterns +- Database access anomalies +- System configuration changes + +## Compliance Considerations + +For production deployments, consider compliance with: +- SOC 2 Type II +- ISO 27001 +- PCI DSS (if handling payment data) +- GDPR (if handling EU personal data) +- HIPAA (if handling healthcare data) + +## Contact Information + +For security issues or questions: +- Security Team: security@yourorganization.com +- Emergency Contact: +1-XXX-XXX-XXXX +- Incident Response: incident-response@yourorganization.com + +--- +**Note**: This is a development/testing environment. Production deployments require significant additional security hardening and should undergo thorough security review and penetration testing. \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/Vagrantfile b/playbooks/ansible-openstack-nova/Vagrantfile new file mode 100644 index 00000000..b958f614 --- /dev/null +++ b/playbooks/ansible-openstack-nova/Vagrantfile @@ -0,0 +1,48 @@ +# Network configuration - can be overridden with environment variables +controller_ip = ENV['CONTROLLER_IP'] || "192.168.56.10" +compute_ip = ENV['COMPUTE_IP'] || "192.168.56.11" + +# Box configuration - can be overridden with environment variables +# For offline usage, you can add a local box with: +# vagrant box add ubuntu2004 file:///path/to/ubuntu2004.box +box_name = ENV['VAGRANT_BOX'] || "generic/ubuntu2004" + +Vagrant.configure("2") do |config| + config.vm.box = box_name + config.vm.box_check_update = false + + # Handle box download errors gracefully + config.vm.box_download_insecure = false + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 2 + libvirt.memory = 4096 + libvirt.uri = 'qemu:///system' + libvirt.cpu_mode = 'host-passthrough' + libvirt.nested = true + end + + config.vm.define "controller" do |controller| + controller.vm.hostname = "controller" + controller.vm.network :private_network, ip: controller_ip + controller.vm.provision :ansible do |ansible| + ansible.playbook = "playbooks/site.yml" + ansible.inventory_path = "inventory/hosts.ini" + ansible.limit = "controller" + ansible.verbose = "v" + ansible.extra_vars = { ansible_python_interpreter: "/usr/bin/python3" } + end + end + + config.vm.define "compute" do |compute| + compute.vm.hostname = "compute" + compute.vm.network :private_network, ip: compute_ip + compute.vm.provision :ansible do |ansible| + ansible.playbook = "playbooks/site.yml" + ansible.inventory_path = "inventory/hosts.ini" + ansible.limit = "compute" + ansible.verbose = "v" + ansible.extra_vars = { ansible_python_interpreter: "/usr/bin/python3" } + end + end +end diff --git a/playbooks/ansible-openstack-nova/add-local-box.sh b/playbooks/ansible-openstack-nova/add-local-box.sh new file mode 100755 index 00000000..cd9def79 --- /dev/null +++ b/playbooks/ansible-openstack-nova/add-local-box.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# add-local-box.sh +# Helper script to add a local Ubuntu 20.04 box for offline usage + +# This script is designed to work with the OpenStack Nova setup project +# It can be called automatically by setup.sh when box download fails + +set -e + +# Default values +BOX_NAME="ubuntu2004" +BOX_URL="https://cloud-images.ubuntu.com/releases/20.04/release/ubuntu-20.04-server-cloudimg-amd64.img" + +# ANSI color codes +COLOR_RED="\033[31m" +COLOR_GREEN="\033[32m" +COLOR_YELLOW="\033[33m" +COLOR_BOLD="\033[1m" +COLOR_RESET="\033[0m" + +# Logging functions +log_info() { + echo "${COLOR_GREEN}[INFO]${COLOR_RESET} $1" +} + +log_warning() { + echo "${COLOR_YELLOW}[WARNING]${COLOR_RESET} $1" +} + +log_error() { + echo "${COLOR_RED}[ERROR]${COLOR_RESET} $1" >&2 + exit 1 +} + +# Parse arguments +while [ $# -gt 0 ]; do + case "$1" in + --box-name=*) + BOX_NAME=$(echo "$1" | cut -d= -f2) + shift + ;; + --box-file=*) + BOX_FILE=$(echo "$1" | cut -d= -f2) + shift + ;; + --help|-h) + echo "Usage: $0 [OPTIONS]" + echo "Helper script to add a local Ubuntu 20.04 box for offline usage" + echo "" + echo "Options:" + echo " --box-name=NAME Box name to use (default: ubuntu2004)" + echo " --box-file=FILE Path to existing box file" + echo " --help, -h Show this help message" + echo "" + echo "Examples:" + echo " $0 # Download and add default box" + echo " $0 --box-name=my-ubuntu --box-file=/path/to/ubuntu.box" + exit 0 + ;; + *) + log_error "Unknown argument: $1" + ;; + esac +done + +# Check if box already exists +if vagrant box list | grep -q "$BOX_NAME"; then + log_warning "Box '$BOX_NAME' already exists. Skipping addition." + exit 0 +fi + +# If box file is provided, use it directly +if [ -n "$BOX_FILE" ]; then + if [ ! -f "$BOX_FILE" ]; then + log_error "Box file '$BOX_FILE' not found." + fi + + log_info "Adding box '$BOX_NAME' from '$BOX_FILE'..." + vagrant box add "$BOX_NAME" "$BOX_FILE" || log_error "Failed to add box from file." + log_info "Box '$BOX_NAME' added successfully." + exit 0 +fi + +# Download and convert cloud image to Vagrant box +log_info "Downloading Ubuntu 20.04 cloud image..." +TEMP_DIR=$(mktemp -d) +cd "$TEMP_DIR" + +# Download cloud image +wget -O ubuntu-20.04.img "$BOX_URL" || log_error "Failed to download cloud image." + +# Create Vagrant box metadata +cat > metadata.json << EOF +{ + "provider": "libvirt", + "format": "qcow2", + "virtual_size": 10 +} +EOF + +# Create Vagrantfile for the box +cat > Vagrantfile << EOF +Vagrant.configure("2") do |config| + config.vm.provider :libvirt do |libvirt| + libvirt.driver = "kvm" + libvirt.host = "localhost" + libvirt.uri = "qemu:///system" + libvirt.memory = 2048 + libvirt.cpus = 2 + end +end +EOF + +# Create box archive +log_info "Creating Vagrant box archive..." +tar cvzf ubuntu2004.box metadata.json Vagrantfile ubuntu-20.04.img || log_error "Failed to create box archive." + +# Add box to Vagrant +log_info "Adding box to Vagrant..." +vagrant box add "$BOX_NAME" ubuntu2004.box || log_error "Failed to add box to Vagrant." + +# Cleanup +cd - +rm -rf "$TEMP_DIR" + +log_info "Box '$BOX_NAME' added successfully." +log_info "You can now use it with: VAGRANT_BOX=$BOX_NAME ./setup.sh" \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/ansible.cfg b/playbooks/ansible-openstack-nova/ansible.cfg new file mode 100644 index 00000000..9c12cc0e --- /dev/null +++ b/playbooks/ansible-openstack-nova/ansible.cfg @@ -0,0 +1,41 @@ +# This file defines default behaviors for Ansible within this project. + +[defaults] +# Specify the location of your inventory file. +inventory = ./inventory/hosts.ini + +# Define where Ansible should look for roles. +roles_path = ./roles + +# WARNING: Host key checking should be enabled in production environments for security. +# For development/lab environments, setting this to False avoids SSH host key prompts. +host_key_checking = False + +# Define where Ansible looks for collections. (Using singular form for future compatibility) +collections_path = ./collections + +# Specify the Python interpreter on the control node. +interpreter_python = /usr/bin/python3 + +# Specify the Python interpreter on the remote managed nodes. +ansible_python_interpreter = /usr/bin/python3 + +# Set the number of parallel processes for Ansible runs (replaces Vagrantfile's raw_args/args) +forks = 5 +# No comments allowed on this line after the '5'! +# Any comments for 'forks' should be on a separate line above or below. + +# Enable fact caching to speed up subsequent playbook runs. +# fact_caching = jsonfile +# fact_caching_connection = /tmp/ansible_fact_cache +# fact_caching_timeout = 86400 + +[privilege_escalation] +become = True +become_method = sudo +become_user = root +become_ask_pass = False + +[ssh_connection] +pipelining = True +# ssh_args = -o ControlMaster=auto -o ControlPersist=60s \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/cleanup.sh b/playbooks/ansible-openstack-nova/cleanup.sh new file mode 100644 index 00000000..30862b13 --- /dev/null +++ b/playbooks/ansible-openstack-nova/cleanup.sh @@ -0,0 +1,134 @@ +#!/bin/sh +# cleanup.sh +# Streams Ansible playbook output and destroys Vagrant VMs if successful. + +# Usage: +# ./cleanup.sh # Basic cleanup +# ./cleanup.sh --force # Force cleanup without playbook success check + +set -e + +# ANSI color codes +COLOR_RED="\033[31m" +COLOR_GREEN="\033[32m" +COLOR_YELLOW="\033[33m" +COLOR_BOLD="\033[1m" +COLOR_UNDERLINE="\033[4m" +COLOR_RESET="\033[0m" + +# Logging functions +log_section() { + echo "${COLOR_BOLD}${COLOR_UNDERLINE}===== $1 =====${COLOR_RESET}" +} + +log_info() { + echo "${COLOR_GREEN}[INFO] $(date '+%Y-%m-%d %H:%M:%S') - $1${COLOR_RESET}" +} + +log_warning() { + echo "${COLOR_YELLOW}[WARNING] $(date '+%Y-%m-%d %H:%M:%S') - $1${COLOR_RESET}" +} + +log_error() { + echo "${COLOR_RED}[ERROR] $(date '+%Y-%m-%d %H:%M:%S') - $1${COLOR_RESET}" >&2 + exit 1 +} + +# Parse arguments +FORCE=false +TIMEOUT=3600 # Increased to 1 hour +while [ $# -gt 0 ]; do + case "$1" in + --force) FORCE=true; shift ;; + --timeout=*) + TIMEOUT=$(echo "$1" | cut -d= -f2) + shift + ;; + *) log_error "Unknown argument: $1" ;; + esac +done + +log_section "Starting Cleanup" + +# Verify vagrant command +command -v vagrant >/dev/null 2>&1 || log_error "Vagrant not installed." + +# Verify Vagrantfile +[ -f Vagrantfile ] || log_error "Vagrantfile not found in current directory." +# Warn if libvirt provider is not configured +grep "provider.*libvirt" Vagrantfile >/dev/null 2>&1 || log_warning "Vagrantfile may not be configured for libvirt provider." + +# Check if VMs are running +log_section "Checking VM Status" +if stdbuf -oL vagrant status | grep -E "controller.*running|compute.*running" | wc -l | grep "^2$" >/dev/null 2>&1; then + log_info "Both controller and compute VMs are running." +else + log_error "VMs (controller and compute) are not both running. Current status:\n$(vagrant status)" +fi + +# Skip playbook check if --force is used +if [ "$FORCE" = true ]; then + log_info "Force mode enabled. Skipping playbook success check." +else + # Wait for Ansible playbook completion while streaming output + log_section "Streaming Ansible Playbook Output" + [ -f vagrant_up.log ] || log_error "vagrant_up.log not found. Run './setup.sh' to provision VMs." + log_info "Streaming output of Ansible playbook (site.yml) from vagrant_up.log (timeout: ${TIMEOUT} seconds)..." # Use ${TIMEOUT} + ELAPSED=0 + SLEEP=10 + tail -n 0 -f vagrant_up.log & + TAIL_PID=$! + while [ "$ELAPSED" -lt "$TIMEOUT" ]; do + if grep "PLAY RECAP" vagrant_up.log >/dev/null 2>&1; then + kill $TAIL_PID 2>/dev/null || true + log_info "Ansible playbook completed." + break + fi + sleep "$SLEEP" + ELAPSED=$(expr $ELAPSED + $SLEEP) + done + + # Ensure tail process is terminated + kill $TAIL_PID 2>/dev/null || true + wait $TAIL_PID 2>/dev/null || true + + if ! grep "PLAY RECAP" vagrant_up.log >/dev/null 2>&1; then + log_error "Ansible playbook did not complete within ${TIMEOUT} seconds. Check vagrant_up.log or increase --timeout. VMs preserved." # Use ${TIMEOUT} + fi + + # Verify failed=0 for controller and compute + log_section "Verifying Playbook Success" + for host in controller compute; do + if grep -A 2 "PLAY RECAP.*$host" vagrant_up.log | grep "failed=0" >/dev/null 2>&1; then + : # No-op + else + log_error "Ansible playbook reported failures for $host. Check vagrant_up.log (search 'PLAY RECAP'). VMs preserved." + fi + done + log_info "Ansible playbook (site.yml) completed successfully with no reported failures." +fi + +# Destroy VMs +log_section "Destroying Vagrant VMs" +if stdbuf -oL vagrant destroy -f >vagrant_destroy.log 2>&1; then + rm -f vagrant_destroy.log + log_info "Vagrant VMs destroyed successfully." +else + log_error "Failed to destroy VMs:\n$(cat vagrant_destroy.log)" +fi + +# Verify libvirt domains are removed +log_section "Verifying libvirt Domain Cleanup" +if stdbuf -oL virsh -c qemu:///system list --all | grep -E "controller|compute" >/dev/null 2>&1; then + log_warning "libvirt domains still exist. Attempting manual cleanup..." + for domain in controller compute; do + stdbuf -oL virsh -c qemu:///system destroy "$domain" 2>/dev/null || true + stdbuf -oL virsh -c qemu:///system undefine "$domain" 2>/dev/null || true + done + if stdbuf -oL virsh -c qemu:///system list --all | grep -E "controller|compute" >/dev/null 2>&1; then + log_error "Failed to remove libvirt domains after manual attempt. Manual intervention required." + fi +fi +log_info "libvirt domains removed successfully." + +log_section "Cleanup Complete" diff --git a/playbooks/ansible-openstack-nova/docs/architecture.md b/playbooks/ansible-openstack-nova/docs/architecture.md new file mode 100644 index 00000000..32b21a3f --- /dev/null +++ b/playbooks/ansible-openstack-nova/docs/architecture.md @@ -0,0 +1,129 @@ +# OpenStack Deployment Architecture + +This document describes the architecture of the OpenStack deployment implemented by this Ansible playbook. + +## Overview + +This deployment follows the standard OpenStack architecture with a controller node and compute nodes. The controller node hosts all the core services, while compute nodes run the hypervisor and related services. + +## Node Roles + +### Controller Node + +The controller node runs the following services: + +1. **Identity Service (Keystone)** + - Provides authentication and authorization for all OpenStack services + - Manages users, projects, roles, and service catalogs + - Uses Apache HTTP server with mod_wsgi to serve the API + +2. **Image Service (Glance)** + - Stores and retrieves virtual machine images + - Supports multiple storage backends + - Integrates with Keystone for authentication + +3. **Compute Service (Nova)** + - Controller components: + - nova-api: REST API interface + - nova-scheduler: Decides which host to run instances on + - nova-conductor: Mediates interactions between nova-compute and database + - nova-novncproxy: Provides VNC access to instances + +4. **Messaging Queue (RabbitMQ)** + - Provides communication between OpenStack services + - Implements AMQP protocol for reliable messaging + +5. **Database (MariaDB)** + - Stores data for all OpenStack services + - Uses MySQL-compatible database engine + +### Compute Nodes + +Compute nodes run the following services: + +1. **Compute Service (Nova)** + - nova-compute: Manages virtual machines through hypervisor APIs + - nova-libvirt: Libvirt driver for managing KVM/QEMU instances + +2. **Networking (Open vSwitch)** + - Provides virtual networking capabilities + - Manages virtual switches, bridges, and VLANs + +## Service Interactions + +### Authentication Flow + +1. User requests authentication through Keystone +2. Keystone validates credentials and returns authentication token +3. User includes token in subsequent requests to other services +4. Services validate token with Keystone before processing requests + +### Instance Creation Flow + +1. User sends instance creation request to Nova API +2. Nova API validates request and forwards to Nova Conductor +3. Nova Conductor queries Nova Scheduler for appropriate compute node +4. Nova Scheduler selects compute node based on available resources +5. Nova Conductor instructs selected Nova Compute to create instance +6. Nova Compute uses Glance to retrieve image +7. Nova Compute uses Neutron for network configuration +8. Nova Compute uses Cinder for block storage (if requested) +9. Instance is created and started on compute node + +### Database Access + +All services access the MariaDB database: +- Keystone stores user, project, and service catalog data +- Glance stores image metadata +- Nova stores instance metadata and scheduling information +- Neutron stores network configuration data + +Services use SQLAlchemy ORM for database access with connection pooling. + +## Security Considerations + +### User Permissions + +Each OpenStack service runs under its own dedicated system user: +- Keystone runs as the `keystone` user +- Glance runs as the `glance` user +- Nova runs as the `nova` user +- Neutron runs as the `neutron` user + +This provides process isolation and limits the impact of potential security breaches. + +### Network Security + +- Services communicate over internal network with encrypted connections where possible +- API endpoints are protected by Keystone authentication +- Database connections use secure authentication mechanisms + +### Data Protection + +- Fernet tokens are used for authentication (no persistence required) +- Credentials are encrypted using credential encryption keys +- Database connections are secured with strong passwords + +## High Availability Considerations + +This deployment is designed for a single-node setup for development and testing. For production environments, consider: + +1. **Database replication** for high availability +2. **Load balancers** for API services +3. **Multiple controller nodes** with clustering +4. **Multiple compute nodes** for workload distribution +5. **Redundant messaging queues** for reliability + +## Deployment Process + +The Ansible playbooks deploy services in the following order: + +1. Common configuration (networking, repositories) +2. Database (MariaDB) +3. Messaging queue (RabbitMQ) +4. Identity service (Keystone) +5. Image service (Glance) +6. Compute service (Nova) +7. Validation and testing + +Each service is configured to start automatically and integrate with the others through the shared messaging queue and database. \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/docs/index.md b/playbooks/ansible-openstack-nova/docs/index.md new file mode 100644 index 00000000..f794cd08 --- /dev/null +++ b/playbooks/ansible-openstack-nova/docs/index.md @@ -0,0 +1,31 @@ +# OpenStack Nova Automation Documentation + +This directory contains comprehensive documentation for the OpenStack Nova automation project implemented by this Ansible playbook. The project automates the complete and robust deployment of OpenStack Nova (Compute Service) along with its minimal dependencies for testing and validation. + +## Table of Contents + +1. [Architecture](architecture.md) - Detailed information about the OpenStack deployment architecture and service interactions +2. [Security](security.md) - Information about security implementation, user permissions, and best practices + +## Overview + +This documentation provides detailed information about the OpenStack deployment implemented by this Ansible playbook. It covers the architecture, security considerations, and other important aspects of the deployment. + +## Architecture + +The deployment follows the standard OpenStack architecture with a controller node and compute nodes. For detailed information about the architecture, see [Architecture Documentation](architecture.md). + +## Security + +Security is an important aspect of any OpenStack deployment. This implementation follows several security best practices including proper user permissions, secure communication, and data protection. For detailed information about security implementation, see [Security Documentation](security.md). + +## Contributing to Documentation + +If you would like to contribute to this documentation: + +1. Fork the repository +2. Create a feature branch +3. Make your changes to the documentation +4. Submit a pull request + +Please ensure that any changes to the documentation are clear, accurate, and follow the existing style and structure. \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/docs/security.md b/playbooks/ansible-openstack-nova/docs/security.md new file mode 100644 index 00000000..7641157d --- /dev/null +++ b/playbooks/ansible-openstack-nova/docs/security.md @@ -0,0 +1,202 @@ +# Security Implementation in OpenStack Deployment + +This document describes the security implementation in this OpenStack deployment, focusing on user permissions, service isolation, and secure communication between components. + +## User Permissions and Service Isolation + +Each OpenStack service runs under its own dedicated system user to provide process isolation and limit the impact of potential security breaches. + +### Keystone (Identity Service) + +- **Service User**: `keystone` +- **Service Group**: `keystone` +- **File Permissions**: Configuration files and directories owned by `keystone:keystone` +- **Execution Context**: Most Keystone management commands run as the `keystone` user +- **Security Benefits**: + - Limits access to Keystone-specific files and directories + - Prevents unauthorized access to authentication tokens and credentials + - Isolates Keystone processes from other system services + +### Glance (Image Service) + +- **Service User**: `glance` +- **Service Group**: `glance` +- **File Permissions**: Configuration files and image storage owned by `glance:glance` +- **Execution Context**: Glance API and registry services run as the `glance` user +- **Security Benefits**: + - Protects virtual machine images from unauthorized access + - Limits access to image metadata and configuration + - Isolates image service processes + +### Nova (Compute Service) + +- **Service User**: `nova` +- **Service Group**: `nova` +- **File Permissions**: Configuration files and instance data owned by `nova:nova` +- **Execution Context**: Nova services run as the `nova` user +- **Security Benefits**: + - Protects virtual machine instances and their data + - Limits access to compute resources and scheduling information + - Isolates compute processes from other services + +### RabbitMQ (Message Queue) + +- **Service User**: `rabbitmq` +- **Service Group**: `rabbitmq` +- **File Permissions**: Configuration and data files owned by `rabbitmq:rabbitmq` +- **Execution Context**: Message broker runs as the `rabbitmq` user +- **Security Benefits**: + - Protects inter-service communication + - Limits access to message queues and exchanges + - Isolates messaging infrastructure + +### MariaDB (Database) + +- **Service User**: `mysql` +- **Service Group**: `mysql` +- **File Permissions**: Database files owned by `mysql:mysql` +- **Execution Context**: Database server runs as the `mysql` user +- **Security Benefits**: + - Protects all OpenStack service data + - Limits database access to authorized services + - Isolates database processes + +## Secure Communication + +### Database Connections + +All services connect to the MariaDB database using secure authentication: + +1. **User Authentication**: Each service uses a dedicated database user with specific privileges +2. **Password Protection**: Strong passwords are used for all database users +3. **Connection Security**: Connections are made over localhost for minimal network exposure +4. **Privilege Limitation**: Each service user has minimal required privileges + +### Message Queue Connections + +Services communicate with RabbitMQ using secure connections: + +1. **User Authentication**: Each service uses a dedicated RabbitMQ user +2. **Password Protection**: Strong passwords protect message queue access +3. **Virtual Hosts**: Services are isolated using separate virtual hosts where appropriate +4. **Access Control**: Fine-grained permissions limit what each service can do + +### API Communication + +OpenStack services communicate via REST APIs with proper authentication: + +1. **Token-Based Authentication**: Keystone tokens are used to authenticate API requests +2. **Service Catalog**: Services discover each other through Keystone's service catalog +3. **Role-Based Access Control**: Users and services have specific roles that limit access +4. **HTTPS Support**: APIs can be configured to use HTTPS for encryption in transit + +## Data Protection + +### Authentication Tokens + +Keystone uses Fernet tokens for authentication: + +1. **No Persistence**: Fernet tokens don't require database storage +2. **Encryption**: Tokens are encrypted and can be validated without database lookups +3. **Rotation**: Keys can be rotated without service interruption +4. **Performance**: Faster validation compared to UUID tokens with database backend + +### Credential Encryption + +Sensitive credentials are protected using encryption: + +1. **Key Management**: Credential keys are managed separately from other services +2. **Encryption at Rest**: Stored credentials are encrypted +3. **Access Control**: Only authorized services can access credential decryption keys + +### Configuration Files + +Configuration files are protected with appropriate permissions: + +1. **File Ownership**: Files are owned by the appropriate service user +2. **Permission Settings**: Sensitive files use restrictive permissions (e.g., 0640) +3. **Directory Permissions**: Directories use appropriate permissions (e.g., 0750) +4. **Secret Protection**: Passwords and other secrets are not stored in plain text where possible + +## Network Security + +### Service Isolation + +Services are isolated through various mechanisms: + +1. **User Isolation**: Each service runs under a separate user account +2. **Network Isolation**: Services communicate through localhost or private networks +3. **Firewall Rules**: Unnecessary ports are blocked to limit exposure +4. **Service Binding**: Services bind only to necessary network interfaces + +### Port Security + +Services use standard ports with security considerations: + +1. **Keystone**: 5000 (public), 35357 (admin) - Protected by authentication +2. **Glance**: 9292 (API) - Protected by authentication +3. **Nova**: 8774 (API), 6080 (VNC) - Protected by authentication +4. **RabbitMQ**: 5672 (AMQP) - Restricted to localhost +5. **MariaDB**: 3306 (MySQL) - Restricted to localhost + +## Best Practices Implemented + +### Principle of Least Privilege + +Each service and user has only the minimum permissions necessary: + +1. **Database Privileges**: Services have access only to their specific databases +2. **File System Access**: Services can only access their own files and directories +3. **Network Access**: Services bind only to necessary interfaces +4. **Command Execution**: Services run with minimal required capabilities + +### Secure Defaults + +The deployment uses secure defaults where possible: + +1. **Strong Passwords**: Default passwords are complex and should be changed +2. **Restricted Access**: Services are configured to limit access by default +3. **Encryption Enabled**: Encryption is enabled for tokens and credentials +4. **Logging**: Security-relevant events are logged for audit purposes + +### Regular Updates + +Security practices include: + +1. **Package Updates**: Services use current stable versions +2. **Security Patches**: Regular updates are applied to fix vulnerabilities +3. **Configuration Reviews**: Security settings are reviewed and updated as needed +4. **Monitoring**: Security events are monitored and alerts are configured + +## Audit and Compliance + +### Logging + +Security-relevant events are logged: + +1. **Authentication Events**: Login attempts and token validations +2. **Authorization Events**: Access control decisions +3. **Configuration Changes**: Changes to service configurations +4. **Error Conditions**: Security-related errors and warnings + +### Monitoring + +Security monitoring includes: + +1. **Log Analysis**: Regular review of security logs +2. **Intrusion Detection**: Monitoring for suspicious activities +3. **Performance Monitoring**: Detection of abnormal resource usage +4. **Compliance Checking**: Verification of security policies + +## Recommendations for Production + +For production deployments, consider these additional security measures: + +1. **Network Segmentation**: Isolate management and data networks +2. **Load Balancers**: Use load balancers with SSL termination +3. **Certificate Management**: Implement proper SSL certificate management +4. **Backup Encryption**: Encrypt backups of sensitive data +5. **Regular Audits**: Perform regular security audits and penetration testing +6. **Multi-Factor Authentication**: Implement MFA for administrative access +7. **Security Updates**: Establish a process for regular security updates +8. **Incident Response**: Develop and maintain an incident response plan \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/inventory/group_vars/all.yml b/playbooks/ansible-openstack-nova/inventory/group_vars/all.yml new file mode 100644 index 00000000..b10d3d3c --- /dev/null +++ b/playbooks/ansible-openstack-nova/inventory/group_vars/all.yml @@ -0,0 +1,58 @@ +--- +# Variables applicable to all hosts in the inventory. + +# Ansible connection variables +ansible_user: vagrant +ansible_become: yes +ansible_python_interpreter: /usr/bin/python3 + +# OpenStack general variables +openstack_db_name: openstack +openstack_db_user: openstack_admin +openstack_db_password: "SUPER_SECURE_DB_PASSWORD" +openstack_admin_password: "ADMIN_PASSWORD_FOR_KEYSTONE" +openstack_region_name: RegionOne + +# RabbitMQ specific variables +rabbitmq_password: "RABBITMQ_SECURE_PASSWORD" + +# Glance specific variables +glance_user_password: "GLANCE_SECURE_PASSWORD" + +# Placement specific variables +placement_user_password: "PLACEMENT_SECURE_PASSWORD" + +# Nova specific variables +nova_user_password: "NOVA_SECURE_PASSWORD" + +# Database connection details (will be used by services like Nova) +# This assumes MariaDB is on the controller node +database_connection_base: "mysql+pymysql://{{ openstack_db_user }}:{{ openstack_db_password }}@{{ hostvars['controller']['ansible_host'] }}" + +# Network configuration +controller_ip_address: "192.168.56.10" +compute_ip_address: "192.168.56.11" + +# List of hosts and their IPs for /etc/hosts configuration +# This is used by the common role to populate /etc/hosts on all nodes. +hosts_entries: + - { ip: "{{ controller_ip_address }}", hostname: "controller" } + - { ip: "{{ compute_ip_address }}", hostname: "compute" } + +# This is used by the common role to populate /etc/hosts on all nodes. +hosts_entries_all: "{{ hosts_entries }}" + +# Nova Validation specific variables +cirros_image_url: "http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img" +cirros_image_name: "cirros-0.5.2-x86_64-disk.img" +cirros_image_glance_name: "cirros-test-image" +test_network_name: "test-net" +test_subnet_name: "test-subnet" +test_subnet_cidr: "10.0.0.0/24" +test_subnet_gateway: "10.0.0.1" +test_dns_nameservers: ["8.8.8.8"] +test_security_group_name: "test-security-group" +test_keypair_name: "test-keypair" +test_flavor_name: "m1.tiny" # Default OpenStack flavor +test_instance_name: "test-nova-instance" +test_physical_network: "physnet1" # This needs to match your Neutron setup. For flat, often 'physnet1'. diff --git a/playbooks/ansible-openstack-nova/inventory/group_vars/computes.yml b/playbooks/ansible-openstack-nova/inventory/group_vars/computes.yml new file mode 100644 index 00000000..7b8bb484 --- /dev/null +++ b/playbooks/ansible-openstack-nova/inventory/group_vars/computes.yml @@ -0,0 +1,5 @@ +--- +# Variables specific to compute nodes. + +# Compute IP address (redundant with hosts.ini but useful for explicit reference in roles) +compute_ip: "{{ compute_ip_address }}" \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/inventory/group_vars/controllers.yml b/playbooks/ansible-openstack-nova/inventory/group_vars/controllers.yml new file mode 100644 index 00000000..dc52efef --- /dev/null +++ b/playbooks/ansible-openstack-nova/inventory/group_vars/controllers.yml @@ -0,0 +1,50 @@ +--- +# Variables specific to controller nodes. + +# Controller IP address (redundant with hosts.ini but useful for explicit reference in roles) +controller_ip: "{{ controller_ip_address }}" + +# Keystone service endpoint URLs +keystone_admin_url: "http://{{ controller_ip }}:5000/v3" +keystone_public_url: "http://{{ controller_ip }}:5000/v3" +keystone_internal_url: "http://{{ controller_ip }}:5000/v3" + +# Glance service endpoint URLs +glance_api_url: "http://{{ controller_ip }}:9292" + +# Placement service endpoint URLs +placement_api_url: "http://{{ controller_ip }}:8778" + +# RabbitMQ host (typically on controller) +rabbitmq_host: "{{ controller_ip }}" + +# MariaDB specific variables +mariadb_bind_address: 0.0.0.0 # Binds to all interfaces, allowing connections from compute node + +# Keystone specific variables +keystone_db_name: keystone +keystone_rc_file: "/root/admin-openrc.sh" # ADDED: Path to the admin-openrc.sh file + +# Glance specific variables +glance_db_name: glance + +# Placement specific variables +placement_db_name: placement + +# Nova service endpoint URLs +nova_public_url: "http://{{ controller_ip }}:8774/v2.1" +nova_internal_url: "http://{{ controller_ip }}:8774/v2.1" +nova_admin_url: "http://{{ controller_ip }}:8774/v2.1" + +# OpenStack CLI configuration for openstack modules +# This tells openstacksdk how to authenticate using the admin-openrc.sh file +openstack_cloud_config: + cloud: admin_cloud # A name for this cloud profile + auth: + auth_url: "{{ keystone_admin_url }}" + username: admin + password: "{{ openstack_admin_password }}" + project_name: admin + user_domain_name: Default + project_domain_name: Default + region_name: "{{ openstack_region_name }}" diff --git a/playbooks/ansible-openstack-nova/inventory/hosts.ini b/playbooks/ansible-openstack-nova/inventory/hosts.ini new file mode 100644 index 00000000..d5b9c06d --- /dev/null +++ b/playbooks/ansible-openstack-nova/inventory/hosts.ini @@ -0,0 +1,9 @@ +[controllers] +controller ansible_host=192.168.56.10 ansible_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/controller/libvirt/private_key + +[computes] +compute ansible_host=192.168.56.11 ansible_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/compute/libvirt/private_key + +[openstack_nodes:children] +controllers +computes diff --git a/playbooks/ansible-openstack-nova/playbooks/check_dependencies.yml b/playbooks/ansible-openstack-nova/playbooks/check_dependencies.yml new file mode 100644 index 00000000..fb3cbc4e --- /dev/null +++ b/playbooks/ansible-openstack-nova/playbooks/check_dependencies.yml @@ -0,0 +1,38 @@ +--- +# This playbook installs and configures all minimal OpenStack dependencies required for Nova. + +- name: Common setup for all OpenStack nodes + hosts: openstack_nodes + become: yes + roles: + - common + +- name: Install and configure MariaDB + hosts: controllers + become: yes + roles: + - mariadb + +- name: Install and configure RabbitMQ + hosts: controllers + become: yes + roles: + - rabbitmq + +- name: Install and configure Keystone (minimal) + hosts: controllers + become: yes + roles: + - keystone_minimal + +- name: Install and configure Glance (minimal) + hosts: controllers + become: yes + roles: + - glance_minimal + +- name: Install and configure Placement (minimal) + hosts: controllers + become: yes + roles: + - placement_minimal diff --git a/playbooks/ansible-openstack-nova/playbooks/install_nova.yml b/playbooks/ansible-openstack-nova/playbooks/install_nova.yml new file mode 100644 index 00000000..83a89361 --- /dev/null +++ b/playbooks/ansible-openstack-nova/playbooks/install_nova.yml @@ -0,0 +1,10 @@ +--- +# This playbook installs and configures Nova on both controller and compute nodes. +# It assumes that all necessary dependencies (MariaDB, RabbitMQ, Keystone, Glance, Placement) +# have already been installed and configured. + +- name: Install and configure Nova + hosts: openstack_nodes # Nova components run on both controller and compute + become: yes + roles: + - nova diff --git a/playbooks/ansible-openstack-nova/playbooks/site.yml b/playbooks/ansible-openstack-nova/playbooks/site.yml new file mode 100644 index 00000000..8d1ccf5f --- /dev/null +++ b/playbooks/ansible-openstack-nova/playbooks/site.yml @@ -0,0 +1,50 @@ +--- +# This is the main playbook that orchestrates the entire OpenStack Nova deployment and validation. + +- name: Common setup for all OpenStack nodes + hosts: openstack_nodes + become: yes + roles: + - common + +- name: Install and configure MariaDB + hosts: controllers + become: yes + roles: + - mariadb + +- name: Install and configure RabbitMQ + hosts: controllers + become: yes + roles: + - rabbitmq + +- name: Install and configure Keystone (minimal) + hosts: controllers + become: yes + roles: + - keystone_minimal + +- name: Install and configure Glance (minimal) + hosts: controllers + become: yes + roles: + - glance_minimal + +- name: Install and configure Placement (minimal) + hosts: controllers + become: yes + roles: + - placement_minimal + +- name: Install and configure Nova + hosts: openstack_nodes # Nova components run on both controller and compute + become: yes + roles: + - nova + +- name: Validate Nova deployment + hosts: controllers # Validation tasks run on the controller where CLI tools are available + become: yes + roles: + - nova_validation diff --git a/playbooks/ansible-openstack-nova/playbooks/validate_nova.yml b/playbooks/ansible-openstack-nova/playbooks/validate_nova.yml new file mode 100644 index 00000000..f2c84859 --- /dev/null +++ b/playbooks/ansible-openstack-nova/playbooks/validate_nova.yml @@ -0,0 +1,10 @@ +--- +# This playbook validates the Nova deployment by checking services, endpoints, +# and launching/cleaning up a test virtual machine. +# It assumes Nova and its dependencies are already installed and running. + +- name: Validate Nova deployment + hosts: controllers # Validation tasks run on the controller where CLI tools are available + become: yes + roles: + - nova_validation \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/requirements.yml b/playbooks/ansible-openstack-nova/requirements.yml new file mode 100644 index 00000000..73036bb7 --- /dev/null +++ b/playbooks/ansible-openstack-nova/requirements.yml @@ -0,0 +1,13 @@ +--- +# This file lists the Ansible collections required by this project. +# It ensures that all necessary modules are available when running the playbooks. + +collections: + - name: community.general + version: ">=8.0.0,<9.0.0" + - name: community.mysql + version: ">=3.5.0" + - name: community.rabbitmq + version: ">=1.6.0" + - name: ansible.posix + version: ">=1.5.0" diff --git a/playbooks/ansible-openstack-nova/roles/common/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/common/tasks/main.yml new file mode 100644 index 00000000..21a12ac3 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/common/tasks/main.yml @@ -0,0 +1,64 @@ +--- +- name: Update apt cache + ansible.builtin.apt: + update_cache: yes + cache_valid_time: 3600 # Cache valid for 1 hour + +- name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + register: install_common_packages + until: install_common_packages is success + retries: 5 + delay: 5 + +- name: Set operating system hostname + ansible.builtin.hostname: + # Use 'node_os_hostname' if defined for this host, otherwise default to 'inventory_hostname' + name: "{{ node_os_hostname | default(inventory_hostname) }}" + when: ansible_hostname != (node_os_hostname | default(inventory_hostname)) + # The 'when' condition also needs to reflect the potential new hostname + +- name: Configure /etc/hosts entries + ansible.builtin.lineinfile: + path: /etc/hosts + regexp: "^{{ item.ip }}\\s+{{ item.hostname }}$" + line: "{{ item.ip }} {{ item.hostname }}" + state: present + loop: "{{ hosts_entries }}" + # Ensure the hosts file is updated on all nodes with correct entries for controller and compute. + +- name: Disable AppArmor (if enabled) + ansible.builtin.service: + name: apparmor + state: stopped + enabled: no + ignore_errors: yes # AppArmor might not be installed on all systems + when: ansible_facts['os_family'] == "Debian" # AppArmor is primarily a Debian/Ubuntu feature + +- name: Ensure AppArmor is purged (if present) + ansible.builtin.apt: + name: apparmor + state: absent + purge: yes + ignore_errors: yes + when: ansible_facts['os_family'] == "Debian" + +- name: Disable swap + ansible.builtin.command: swapoff -a + changed_when: true # Always report as changed if swap is active + failed_when: false # Don't fail if swapoff fails (e.g., no swap configured) + +- name: Comment out swap entries in /etc/fstab + ansible.builtin.replace: + path: /etc/fstab + regexp: '^(/swapfile|UUID=.*none\\s+swap)' + replace: '#\1' + when: ansible_facts['mounts'] | selectattr('fstype', 'equalto', 'swap') | list | length > 0 + +- name: Install PyMySQL for database connectivity + ansible.builtin.pip: + name: PyMySQL + state: present + executable: pip3 # Ensure pip3 is used diff --git a/playbooks/ansible-openstack-nova/roles/common/vars/main.yml b/playbooks/ansible-openstack-nova/roles/common/vars/main.yml new file mode 100644 index 00000000..315bcdf9 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/common/vars/main.yml @@ -0,0 +1,17 @@ + +common_packages: + - curl + - wget + - git + - vim + - python3-openstackclient # Essential for interacting with OpenStack APIs + - open-iscsi # Required for Nova to connect to Cinder volumes + - python3-pip # Required for installing Python packages + - python3-dev # Needed for compiling Python extensions + - libffi-dev # Required for cryptography and OpenStack dependencies + - libssl-dev # Needed for SSL/TLS support in Python packages + - mariadb-client # For database connectivity (used by OpenStack services) + - chrony # Critical for time synchronization across all nodes + - bridge-utils # For network bridge configuration (e.g., for Neutron) + +hosts_entries: "{{ hosts_entries_all }}" \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/glance_minimal/handlers/main.yml b/playbooks/ansible-openstack-nova/roles/glance_minimal/handlers/main.yml new file mode 100644 index 00000000..61bb25f7 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/glance_minimal/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: Restart glance-api + ansible.builtin.service: + name: glance-api + state: restarted + become: yes + +- name: Restart glance-registry + ansible.builtin.service: + name: glance-registry + state: restarted + become: yes \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/glance_minimal/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/glance_minimal/tasks/main.yml new file mode 100644 index 00000000..f20f2c13 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/glance_minimal/tasks/main.yml @@ -0,0 +1,217 @@ +--- +- name: Install Glance packages and OpenStack client + ansible.builtin.apt: + name: + - glance + - python3-openstackclient + - bash + state: present + update_cache: yes + become: yes + register: apt_result + retries: 3 + delay: 5 + until: apt_result is success + notify: + - Restart glance-api + - Restart glance-registry + when: inventory_hostname == 'controller' + +- name: Check if MariaDB Unix socket exists + ansible.builtin.stat: + path: /var/run/mysqld/mysqld.sock + register: socket_stat + failed_when: not socket_stat.stat.exists + when: inventory_hostname == 'controller' + +- name: Create Glance database + community.mysql.mysql_db: + name: "{{ glance_db_name }}" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + become: yes + when: inventory_hostname == 'controller' and socket_stat.stat.exists + +- name: Grant privileges to Glance database user + community.mysql.mysql_user: + name: "{{ openstack_db_user }}" + password: "{{ openstack_db_password }}" + host: "%" + priv: "{{ glance_db_name }}.*:ALL" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + become: yes + when: inventory_hostname == 'controller' and socket_stat.stat.exists + +- name: Populate the Glance database + ansible.builtin.command: + cmd: glance-manage --config-file /etc/glance/glance-api.conf db_sync + creates: /etc/glance/db_synced + become: yes + become_user: glance + register: glance_db_sync_result + retries: 5 + delay: 10 + until: glance_db_sync_result is success + when: inventory_hostname == 'controller' + +- name: Check if Glance service user exists + ansible.builtin.command: + cmd: openstack user show glance --domain Default + executable: /bin/bash + environment: + OS_CLOUD: "" + OS_AUTH_URL: "{{ keystone_public_url }}" + OS_PROJECT_DOMAIN_NAME: Default + OS_USER_DOMAIN_NAME: Default + OS_PROJECT_NAME: admin + OS_USERNAME: admin + OS_PASSWORD: "{{ openstack_admin_password }}" + OS_IDENTITY_API_VERSION: 3 + register: glance_user_check + failed_when: glance_user_check.rc not in [0, 1] + changed_when: false + when: inventory_hostname == 'controller' + +- name: Create Glance service user in Keystone + ansible.builtin.command: + cmd: openstack user create --domain Default --password "{{ glance_user_password }}" glance + executable: /bin/bash + environment: + OS_CLOUD: "" + OS_AUTH_URL: "{{ keystone_public_url }}" + OS_PROJECT_DOMAIN_NAME: Default + OS_USER_DOMAIN_NAME: Default + OS_PROJECT_NAME: admin + OS_USERNAME: admin + OS_PASSWORD: "{{ openstack_admin_password }}" + OS_IDENTITY_API_VERSION: 3 + when: + - glance_user_check.rc == 1 + - inventory_hostname == 'controller' + register: glance_user_create + retries: 3 + delay: 5 + until: glance_user_create.rc == 0 or 'already exists' in glance_user_create.stderr + changed_when: glance_user_create.rc == 0 + failed_when: glance_user_create.rc != 0 and 'already exists' not in glance_user_create.stderr + + +- name: Check if Glance service exists + ansible.builtin.command: + cmd: openstack service show glance + executable: /bin/bash + environment: + OS_CLOUD: "" + OS_AUTH_URL: "{{ keystone_public_url }}" + OS_PROJECT_DOMAIN_NAME: Default + OS_USER_DOMAIN_NAME: Default + OS_PROJECT_NAME: admin + OS_USERNAME: admin + OS_PASSWORD: "{{ openstack_admin_password }}" + OS_IDENTITY_API_VERSION: 3 + register: glance_service_check + failed_when: glance_service_check.rc not in [0, 1] + changed_when: false + when: inventory_hostname == 'controller' + +- name: Create Glance service in Keystone + ansible.builtin.command: + cmd: openstack service create --name glance --description "OpenStack Image service" image + executable: /bin/bash + environment: + OS_CLOUD: "" + OS_AUTH_URL: "{{ keystone_public_url }}" + OS_PROJECT_DOMAIN_NAME: Default + OS_USER_DOMAIN_NAME: Default + OS_PROJECT_NAME: admin + OS_USERNAME: admin + OS_PASSWORD: "{{ openstack_admin_password }}" + OS_IDENTITY_API_VERSION: 3 + when: + - glance_service_check.rc == 1 + - inventory_hostname == 'controller' + register: glance_service_create + retries: 3 + delay: 5 + until: glance_service_create.rc == 0 or 'already exists' in glance_service_create.stderr + changed_when: glance_service_create.rc == 0 + failed_when: glance_service_create.rc != 0 and 'already exists' not in glance_service_create.stderr + +- name: Create or update Glance endpoints in Keystone + ansible.builtin.command: + cmd: | + if ! openstack endpoint show glance {{ item.interface }} &>/dev/null; then + openstack endpoint create --region "{{ openstack_region_name }}" image {{ item.interface }} "{{ item.url }}" + else + openstack endpoint set --region "{{ openstack_region_name }}" --url "{{ item.url }}" {{ item.interface }} image + fi + executable: /bin/bash + loop: + - { interface: 'public', url: "{{ glance_api_url }}" } + - { interface: 'internal', url: "{{ glance_api_url }}" } + - { interface: 'admin', url: "{{ glance_api_url }}" } + environment: + OS_CLOUD: "" + OS_AUTH_URL: "{{ keystone_public_url }}" + OS_PROJECT_DOMAIN_NAME: Default + OS_USER_DOMAIN_NAME: Default + OS_PROJECT_NAME: admin + OS_USERNAME: admin + OS_PASSWORD: "{{ openstack_admin_password }}" + OS_IDENTITY_API_VERSION: 3 + register: glance_endpoint_result + retries: 3 + delay: 5 + until: glance_endpoint_result.rc == 0 + changed_when: glance_endpoint_result.rc == 0 + failed_when: glance_endpoint_result.rc != 0 + when: inventory_hostname == 'controller' + +- name: Configure Glance API + ansible.builtin.template: + src: glance-api.conf.j2 + dest: /etc/glance/glance-api.conf + owner: glance + group: glance + mode: '0640' + become: yes + notify: Restart glance-api + when: inventory_hostname == 'controller' + +- name: Configure Glance Registry + ansible.builtin.template: + src: glance-registry.conf.j2 + dest: /etc/glance/glance-registry.conf + owner: glance + group: glance + mode: '0640' + become: yes + notify: Restart glance-registry + when: inventory_hostname == 'controller' + +- name: Ensure Glance API service is running and enabled + ansible.builtin.service: + name: glance-api + state: started + enabled: yes + become: yes + register: glance_api_service_result + retries: 3 + delay: 5 + until: glance_api_service_result is success + when: inventory_hostname == 'controller' + +- name: Ensure Glance Registry service is running and enabled + ansible.builtin.service: + name: glance-registry + state: started + enabled: yes + become: yes + register: glance_registry_service_result + retries: 3 + delay: 5 + until: glance_registry_service_result is success + when: inventory_hostname == 'controller' \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/glance_minimal/templates/glance-api.conf.j2 b/playbooks/ansible-openstack-nova/roles/glance_minimal/templates/glance-api.conf.j2 new file mode 100644 index 00000000..471184fb --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/glance_minimal/templates/glance-api.conf.j2 @@ -0,0 +1,40 @@ +# Minimal Glance API configuration for OpenStack + +[DEFAULT] +# The verbose option will make the log output more verbose. +# verbose = true + +# The debug option will make the log output really verbose. +# debug = true + +# Connection string for the database. +bind_host = 0.0.0.0 +bind_port = 9292 +# By default, the API and Registry use the same database connection. + +[database] +connection = {{ database_connection_base }}/{{ glance_db_name }} + +[keystone_authtoken] +# The URL to the Keystone authentication server. +www_authenticate_uri = {{ keystone_public_url }} +auth_url = {{ keystone_admin_url }} +memcached_servers = localhost:11211 +auth_type = password +project_domain_name = Default +user_domain_name = Default +project_name = service +username = glance +password = {{ glance_user_password }} + +[paste_deploy] +flavor = keystone + +[glance_store] +# The backend store to use. For minimal setup, file is simplest. +stores = file,http +default_store = file +filesystem_store_datadir = /var/lib/glance/images/ + +[oslo_concurrency] +lock_path = /var/lib/glance/tmp diff --git a/playbooks/ansible-openstack-nova/roles/glance_minimal/templates/glance-registry.conf.j2 b/playbooks/ansible-openstack-nova/roles/glance_minimal/templates/glance-registry.conf.j2 new file mode 100644 index 00000000..095a6468 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/glance_minimal/templates/glance-registry.conf.j2 @@ -0,0 +1,33 @@ +# Minimal Glance Registry configuration for OpenStack + +[DEFAULT] +# The verbose option will make the log output more verbose. +# verbose = true + +# The debug option will make the log output really verbose. +# debug = true + +# Connection string for the database. +bind_host = 0.0.0.0 +bind_port = 9191 + +[database] +connection = {{ database_connection_base }}/{{ glance_db_name }} + +[keystone_authtoken] +# The URL to the Keystone authentication server. +www_authenticate_uri = {{ keystone_public_url }} +auth_url = {{ keystone_admin_url }} +memcached_servers = localhost:11211 +auth_type = password +project_domain_name = Default +user_domain_name = Default +project_name = service +username = glance +password = {{ glance_user_password }} + +[paste_deploy] +flavor = keystone + +[oslo_concurrency] +lock_path = /var/lib/glance/tmp \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/handlers/main.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/handlers/main.yml new file mode 100644 index 00000000..21bb8074 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: Restart apache2 + ansible.builtin.service: + name: apache2 + state: restarted + become: yes + +- name: Mark db_sync complete + ansible.builtin.file: + path: /etc/keystone/db_synced + state: touch + owner: keystone + group: keystone + mode: '0640' + become: yes \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/apache2_conf.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/apache2_conf.yml new file mode 100644 index 00000000..3195df23 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/apache2_conf.yml @@ -0,0 +1,24 @@ +--- +# Insert ServerName directive to prevent Apache warnings +# This is needed to avoid "Could not reliably determine the server's fully qualified domain name" warning +- name: Insert 'ServerName localhost' after ServerRoot line + ansible.builtin.lineinfile: + path: /etc/apache2/apache2.conf + line: "ServerName localhost" + insertafter: '^#ServerRoot.*' + state: present + backup: yes + become: true + notify: Restart apache2 + when: inventory_hostname == 'controller' + +# Enable Keystone WSGI configuration in Apache2 +# This allows Apache to serve the Keystone API via WSGI +- name: Enable Keystone WSGI in Apache2 + ansible.builtin.file: + src: /usr/share/keystone/wsgi-keystone.conf + dest: /etc/apache2/conf-enabled/wsgi-keystone.conf + state: link + become: true + notify: Restart apache2 + when: inventory_hostname == 'controller' \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/create_openrc.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/create_openrc.yml new file mode 100644 index 00000000..bb41b497 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/create_openrc.yml @@ -0,0 +1,12 @@ +--- +# Create the admin-openrc.sh file which contains environment variables +# needed to authenticate as the admin user with the OpenStack CLI +- name: Create admin-openrc.sh file on controller + ansible.builtin.template: + src: admin-openrc.sh.j2 + dest: /root/admin-openrc.sh + owner: root + group: root + mode: '0600' + become: true + when: inventory_hostname == 'controller' \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/db_initialise.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/db_initialise.yml new file mode 100644 index 00000000..150ba5e3 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/db_initialise.yml @@ -0,0 +1,48 @@ +--- +# Install MySQL Python bindings required for database operations +# These are needed for the community.mysql modules to work +- name: Install MySQL Python bindings + ansible.builtin.apt: + name: python3-pymysql + state: present + update_cache: yes + register: apt_result + retries: 3 + delay: 5 + until: apt_result is success + become: true + when: inventory_hostname == 'controller' + +# Check if MariaDB Unix socket exists to ensure database is running +# This prevents errors if the database service is not yet available +- name: Check if MariaDB Unix socket exists + ansible.builtin.stat: + path: /var/run/mysqld/mysqld.sock + register: mysql_socket_stat + failed_when: not mysql_socket_stat.stat.exists + when: inventory_hostname == 'controller' + +# Create the Keystone database in MariaDB +# This is done with root privileges as we need to create the database +- name: Create Keystone database + community.mysql.mysql_db: + name: "{{ openstack_db_name }}" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + become: true + when: inventory_hostname == 'controller' and mysql_socket_stat.stat.exists + +# Grant privileges to the Keystone database user +# This allows the keystone service to access its database +- name: Grant privileges to Keystone database user + community.mysql.mysql_user: + name: "{{ openstack_db_user }}" + password: "{{ openstack_db_password }}" + host: "%" + priv: "{{ openstack_db_name }}.*:ALL" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + become: true + when: inventory_hostname == 'controller' and mysql_socket_stat.stat.exists \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/fernet_config.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/fernet_config.yml new file mode 100644 index 00000000..8242fc3a --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/fernet_config.yml @@ -0,0 +1,46 @@ +--- +# Ensure the Fernet keys directory exists with proper ownership and permissions +# Fernet tokens are used for authentication in Keystone +- name: Ensure Fernet keys directory exists + ansible.builtin.file: + path: /etc/keystone/fernet-keys + state: directory + owner: keystone + group: keystone + mode: '0750' + become: true + when: inventory_hostname == 'controller' + +# Initialize the Fernet key repository +# This should be run as the keystone user to ensure proper file permissions +- name: Initialize Fernet key repository + ansible.builtin.command: + cmd: keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone + creates: /etc/keystone/fernet-keys/0 + become: true + # Run as the keystone user to ensure proper file permissions and service access + become_user: keystone + when: inventory_hostname == 'controller' + +# Ensure the credential keys directory exists with proper ownership and permissions +# Credential keys are used for encrypting credentials stored in Keystone +- name: Ensure credential keys directory exists + ansible.builtin.file: + path: /etc/keystone/credential-keys + state: directory + owner: keystone + group: keystone + mode: '0750' + become: true + when: inventory_hostname == 'controller' + +# Initialize the Credential key repository +# This should be run as the keystone user to ensure proper file permissions +- name: Initialize Credential key repository + ansible.builtin.command: + cmd: keystone-manage credential_setup --keystone-user keystone --keystone-group keystone + creates: /etc/keystone/credential-keys/0 + become: true + # Run as the keystone user to ensure proper file permissions and service access + become_user: keystone + when: inventory_hostname == 'controller' \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_bootstrap.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_bootstrap.yml new file mode 100644 index 00000000..42633860 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_bootstrap.yml @@ -0,0 +1,55 @@ +--- +# Ensure the keystone system user exists +# This user will be used to run keystone services and own keystone files +- name: Ensure keystone user exists + ansible.builtin.user: + name: keystone + system: yes + shell: /usr/sbin/nologin + become: true + when: inventory_hostname == 'controller' + +# Ensure the keystone log directory exists with proper ownership +- name: Ensure /var/log/keystone directory exists + ansible.builtin.file: + path: /var/log/keystone + state: directory + owner: keystone + group: keystone + mode: '0755' + become: true + when: inventory_hostname == 'controller' + +# Ensure the keystone log file exists with proper ownership +- name: Ensure Keystone log file exists + ansible.builtin.file: + path: /var/log/keystone/keystone-manage.log + state: touch + owner: keystone + group: keystone + mode: '0640' + become: true + when: inventory_hostname == 'controller' + +# Bootstrap the Keystone service to create initial admin user, project, and endpoints +# This should be run only once during initial setup +- name: Bootstrap Keystone service + ansible.builtin.command: + cmd: > + keystone-manage bootstrap + --bootstrap-password "{{ openstack_admin_password }}" + --bootstrap-username admin + --bootstrap-project-name admin + --bootstrap-role-name admin + --bootstrap-service-name keystone + --bootstrap-region-id "{{ openstack_region_name }}" + --bootstrap-admin-url "{{ keystone_admin_url }}" + --bootstrap-public-url "{{ keystone_public_url }}" + --bootstrap-internal-url "{{ keystone_internal_url }}" + creates: /etc/keystone/bootstrap_complete + become: true + # Run as the keystone user to ensure proper file permissions and service access + become_user: keystone + environment: + OS_CLOUD: "" + when: inventory_hostname == 'controller' \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_compo.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_compo.yml new file mode 100644 index 00000000..e2ae62ca --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_compo.yml @@ -0,0 +1,19 @@ +--- +# Install Keystone identity service, Apache2 web server, and WSGI module +# Also install python3-openstackclient for command-line management tools +- name: Install Keystone, Apache2, and WSGI module + ansible.builtin.apt: + name: + - keystone + - apache2 + - libapache2-mod-wsgi-py3 + - python3-openstackclient + state: present + update_cache: yes + register: apt_result + retries: 3 + delay: 5 + until: apt_result is success + become: true + notify: Restart apache2 + when: inventory_hostname == 'controller' \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_setup.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_setup.yml new file mode 100644 index 00000000..3c8f10b1 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/keystone_setup.yml @@ -0,0 +1,72 @@ +--- +# Remove any residual SQLite database files that might interfere with MySQL setup +- name: Remove residual SQLite database files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/keystone/keystone.db + - /etc/keystone/keystone.conf.d + become: true + when: inventory_hostname == 'controller' + +# Configure Keystone with database connection and other settings +# The configuration file is owned by the keystone user for proper permissions +- name: Configure Keystone database connection + ansible.builtin.template: + src: keystone.conf.j2 + dest: /etc/keystone/keystone.conf + owner: keystone + group: keystone + mode: '0640' + become: true + notify: Restart apache2 + when: inventory_hostname == 'controller' + +# Debug task to display the Keystone configuration (useful for troubleshooting) +- name: Debug Keystone configuration + ansible.builtin.command: + cmd: cat /etc/keystone/keystone.conf + register: keystone_conf_content + changed_when: false + become: true + when: inventory_hostname == 'controller' + +# Display the Keystone configuration for debugging purposes +- name: Display Keystone configuration + ansible.builtin.debug: + msg: "{{ keystone_conf_content.stdout_lines }}" + when: inventory_hostname == 'controller' + +# Verify that Keystone can connect to the database before proceeding +# This helps catch configuration issues early in the process +- name: Verify Keystone database connectivity + ansible.builtin.command: + cmd: mysql -u "{{ openstack_db_user }}" -p"{{ openstack_db_password }}" -h localhost -e "SELECT 1 FROM information_schema.tables WHERE table_schema='{{ openstack_db_name }}'" + register: db_connect_result + changed_when: false + failed_when: db_connect_result.rc != 0 + retries: 3 + delay: 5 + until: db_connect_result is success + become: true + when: inventory_hostname == 'controller' + +# Populate the Keystone database with initial schema +# This should be run as the keystone user to ensure proper file permissions +- name: Populate the Keystone database + ansible.builtin.command: + cmd: keystone-manage --config-file /etc/keystone/keystone.conf db_sync + creates: /etc/keystone/db_synced + become: true + # Run as the keystone user to ensure proper file permissions and service access + become_user: keystone + environment: + OSLO_CONFIG_FILE: /etc/keystone/keystone.conf + PYTHONPATH: /usr/lib/python3/dist-packages + register: keystone_db_sync_result + retries: 5 + delay: 10 + until: keystone_db_sync_result is success + when: inventory_hostname == 'controller' + notify: Mark db_sync complete \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/main.yml new file mode 100644 index 00000000..01445376 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/tasks/main.yml @@ -0,0 +1,35 @@ +--- +# Initialize the Keystone database by creating the database and user +- name: Initialize Keystone Database + ansible.builtin.include_tasks: db_initialise.yml + when: inventory_hostname == 'controller' + +# Install Keystone identity service and Apache2 web server +- name: Install Keystone and Apache2 + ansible.builtin.include_tasks: keystone_compo.yml + when: inventory_hostname == 'controller' + +# Configure Keystone service with database connection and other settings +- name: Configure Keystone + ansible.builtin.include_tasks: keystone_setup.yml + when: inventory_hostname == 'controller' + +# Configure Fernet and Credential key repositories for token and credential encryption +- name: Configure Fernet and Credential Repositories + ansible.builtin.include_tasks: fernet_config.yml + when: inventory_hostname == 'controller' + +# Bootstrap the Keystone service to create initial admin user, project, and endpoints +- name: Bootstrap Keystone Service + ansible.builtin.include_tasks: keystone_bootstrap.yml + when: inventory_hostname == 'controller' + +# Configure Apache web server for Keystone WSGI application +- name: Configure Apache + ansible.builtin.include_tasks: apache2_conf.yml + when: inventory_hostname == 'controller' + +# Create the admin-openrc.sh file for CLI authentication +- name: Create OpenRC File + ansible.builtin.include_tasks: create_openrc.yml + when: inventory_hostname == 'controller' \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/templates/admin-openrc.sh.j2 b/playbooks/ansible-openstack-nova/roles/keystone_minimal/templates/admin-openrc.sh.j2 new file mode 100644 index 00000000..659c13f4 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/templates/admin-openrc.sh.j2 @@ -0,0 +1,7 @@ +export OS_USERNAME=admin +export OS_PASSWORD={{ openstack_admin_password }} +export OS_PROJECT_NAME=admin +export OS_USER_DOMAIN_NAME=Default +export OS_PROJECT_DOMAIN_NAME=Default +export OS_AUTH_URL={{ keystone_public_url }} +export OS_IDENTITY_API_VERSION=3 \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/keystone_minimal/templates/keystone.conf.j2 b/playbooks/ansible-openstack-nova/roles/keystone_minimal/templates/keystone.conf.j2 new file mode 100644 index 00000000..0a0e210f --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/keystone_minimal/templates/keystone.conf.j2 @@ -0,0 +1,30 @@ +[DEFAULT] +log_dir = /var/log/keystone +debug = False + +[database] +connection = mysql+pymysql://{{ openstack_db_user }}:{{ openstack_db_password }}@localhost/{{ keystone_db_name }} + +[token] +provider = fernet + +[cache] +backend = dogpile.cache.memcache +enabled = true + +[memcache] +# memcache_servers = localhost:11211 + +[assignment] +driver = sql + +[auth] +methods = external,password,token,oauth1 +password = keystone.auth.backends.sql.Password +token = keystone.auth.backends.sql.Token + +[federation] +driver = sql + +[oslo_middleware] +enable_proxy_headers_parsing = true diff --git a/playbooks/ansible-openstack-nova/roles/mariadb/handlers/main.yml b/playbooks/ansible-openstack-nova/roles/mariadb/handlers/main.yml new file mode 100644 index 00000000..45c279a3 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/mariadb/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart mariadb + ansible.builtin.service: + name: mariadb + state: restarted diff --git a/playbooks/ansible-openstack-nova/roles/mariadb/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/mariadb/tasks/main.yml new file mode 100644 index 00000000..cde727d4 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/mariadb/tasks/main.yml @@ -0,0 +1,110 @@ +--- +- name: Install MariaDB server and Python DB connector + ansible.builtin.apt: + name: + - mariadb-server + - python3-mysqldb # Essential for community.mysql modules to connect + state: present + update_cache: yes # Ensure package cache is updated + become: yes + register: apt_install_result # Register result for retries + until: apt_install_result is success # Retry until successful + retries: 3 + delay: 5 # Wait 5 seconds between retries + notify: Restart mariadb + +- name: Configure MariaDB server (my.cnf) + ansible.builtin.template: + src: my.cnf.j2 + dest: /etc/mysql/mariadb.conf.d/99-openstack.cnf + owner: root + group: root + mode: '0644' + become: yes + notify: Restart mariadb + +- name: Ensure MariaDB service is running and enabled + ansible.builtin.service: + name: mariadb + state: started + enabled: yes + become: yes + register: mariadb_service_result # Register result for retries + until: mariadb_service_result is success # Retry until successful + retries: 3 + delay: 5 # Wait 5 seconds between retries + +# --- FIX: Use login_unix_socket for initial root access --- +# Add a check to ensure the socket exists before attempting to use it +- name: Check if MariaDB Unix socket exists + ansible.builtin.stat: + path: /var/run/mysqld/mysqld.sock + register: socket_stat + failed_when: not socket_stat.stat.exists # Fail if socket doesn't exist + when: inventory_hostname == 'controller' # Only run on the controller + +- name: Secure MariaDB installation - Remove anonymous users + community.mysql.mysql_user: + name: '' + host: "{{ item }}" + state: absent + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock # FIX: Use Unix socket for root authentication + become: yes + loop: + - controller # Assuming this is the hostname for the controller + - localhost + when: inventory_hostname == 'controller' and socket_stat.stat.exists # Only run if on controller AND socket exists + +- name: Secure MariaDB installation - Disallow root login remotely + community.mysql.mysql_user: + name: root + host: "{{ item }}" + state: absent + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock # FIX: Use Unix socket for root authentication + become: yes + loop: + - 127.0.0.1 + - ::1 + - "{{ ansible_hostname }}" # Ensure controller's own hostname is covered + when: inventory_hostname == 'controller' and socket_stat.stat.exists + +- name: Secure MariaDB installation - Remove test database + community.mysql.mysql_db: + name: test + state: absent + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock # FIX: Use Unix socket for root authentication + become: yes + when: inventory_hostname == 'controller' and socket_stat.stat.exists + +# --- NEW: Create OpenStack database --- +- name: Create OpenStack database + community.mysql.mysql_db: + name: "{{ openstack_db_name }}" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + become: yes + when: inventory_hostname == 'controller' and socket_stat.stat.exists + +- name: Create OpenStack database user + community.mysql.mysql_user: + name: "{{ openstack_db_user }}" + password: "{{ openstack_db_password }}" + host: "%" # Allow connections from any host (e.g., compute node) + priv: "{{ openstack_db_name }}.*:ALL" # Grant all privileges on OpenStack DB + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock # FIX: Use Unix socket for root authentication + become: yes + when: inventory_hostname == 'controller' and socket_stat.stat.exists + +- name: Flush privileges after user creation + community.mysql.mysql_query: + query: FLUSH PRIVILEGES + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock # FIX: Use Unix socket for root authentication + become: yes + when: inventory_hostname == 'controller' and socket_stat.stat.exists diff --git a/playbooks/ansible-openstack-nova/roles/mariadb/templates/my.cnf.j2 b/playbooks/ansible-openstack-nova/roles/mariadb/templates/my.cnf.j2 new file mode 100644 index 00000000..42b5f4e8 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/mariadb/templates/my.cnf.j2 @@ -0,0 +1,15 @@ +# Custom MariaDB configuration for OpenStack + +[mysqld] +bind-address = {{ mariadb_bind_address }} + +default-storage-engine = innodb +innodb_file_per_table = on +max_connections = 4096 +collation-server = utf8mb4_general_ci +character-set-server = utf8mb4 + +# Required for XtraDB/InnoDB to function correctly with OpenStack +# These values are common recommendations, adjust if needed for larger deployments +innodb_buffer_pool_size = 256M # Adjust based on available RAM and database size +innodb_log_file_size = 64M \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/nova/handlers/main.yml b/playbooks/ansible-openstack-nova/roles/nova/handlers/main.yml new file mode 100644 index 00000000..25f96653 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/handlers/main.yml @@ -0,0 +1,54 @@ +--- +- name: Restart nova-api + ansible.builtin.service: + name: nova-api + state: restarted + listen: "Restart nova-api" + +- name: Restart nova-scheduler + ansible.builtin.service: + name: nova-scheduler + state: restarted + listen: "Restart nova-scheduler" + +- name: Restart nova-conductor + ansible.builtin.service: + name: nova-conductor + state: restarted + listen: "Restart nova-conductor" + +- name: Restart nova-novncproxy + ansible.builtin.service: + name: nova-novncproxy + state: restarted + listen: "Restart nova-novncproxy" + +- name: Restart nova-consoleproxy + ansible.builtin.service: + name: nova-consoleproxy + state: restarted + listen: "Restart nova-consoleproxy" + +- name: Restart nova-compute + ansible.builtin.service: + name: nova-compute + state: restarted + listen: "Restart nova-compute" + +- name: Restart libvirtd + ansible.builtin.service: + name: libvirtd + state: restarted + listen: "Restart libvirtd" + +- name: Restart networking + ansible.builtin.service: + name: networking + state: restarted + listen: "Restart networking" + +- name: Restart apache2 + ansible.builtin.service: + name: apache2 + state: restarted + listen: "Restart apache2" diff --git a/playbooks/ansible-openstack-nova/roles/nova/tasks/_config.yml b/playbooks/ansible-openstack-nova/roles/nova/tasks/_config.yml new file mode 100644 index 00000000..ee763135 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/tasks/_config.yml @@ -0,0 +1,83 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/ansible/schemas/main/f/ansible-tasks.json +# Tasks for configuring Nova (nova.conf) on both controller and compute nodes. + +- name: Ensure /etc/nova directory exists + ansible.builtin.file: + path: /etc/nova + state: directory + owner: nova + group: nova + mode: '0755' + become: true + +- name: Configure Nova (nova.conf) + ansible.builtin.template: + src: nova.conf.j2 + dest: /etc/nova/nova.conf + owner: nova + group: nova + mode: '0640' + become: true + notify: + - Restart nova-api + - Restart nova-scheduler + - Restart nova-conductor + - Restart nova-novncproxy + - Restart nova-compute + +- name: Check if Nova API database is already synced + ansible.builtin.stat: + path: /var/lib/nova/.api_db_synced + register: nova_api_db_synced + when: inventory_hostname in groups['controllers'] + +- name: Populate the Nova API database (on controller) + ansible.builtin.command: nova-manage api_db sync + become: true + become_user: nova + register: nova_api_db_sync_result + changed_when: nova_api_db_sync_result.rc == 0 + failed_when: nova_api_db_sync_result.rc != 0 + when: + - inventory_hostname in groups['controllers'] + - not nova_api_db_synced.stat.exists + +- name: Mark Nova API database as synced + ansible.builtin.file: + path: /var/lib/nova/.api_db_synced + state: touch + owner: nova + group: nova + mode: '0644' + become: true + when: + - inventory_hostname in groups['controllers'] + +- name: Check if Nova database is already synced + ansible.builtin.stat: + path: /var/lib/nova/.db_synced + register: nova_db_synced + when: inventory_hostname in groups['controllers'] + +- name: Populate the Nova database (on controller) + ansible.builtin.command: nova-manage db sync + become: true + become_user: nova + register: nova_db_sync_result + changed_when: nova_db_sync_result.rc == 0 + failed_when: nova_db_sync_result.rc != 0 + when: + - inventory_hostname in groups['controllers'] + - not nova_db_synced.stat.exists + +- name: Mark Nova database as synced + ansible.builtin.file: + path: /var/lib/nova/.db_synced + state: touch + owner: nova + group: nova + mode: '0644' + become: true + when: + - inventory_hostname in groups['controllers'] diff --git a/playbooks/ansible-openstack-nova/roles/nova/tasks/_db_setup.yml b/playbooks/ansible-openstack-nova/roles/nova/tasks/_db_setup.yml new file mode 100644 index 00000000..a154baa8 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/tasks/_db_setup.yml @@ -0,0 +1,40 @@ +--- +# Tasks for setting up the Nova database on the controller. + +- name: Create Nova database + community.mysql.mysql_db: + name: "{{ nova_db_name }}" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + delegate_to: "{{ inventory_hostname }}" + +- name: Create Nova API database + community.mysql.mysql_db: + name: "{{ nova_api_db_name }}" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + delegate_to: "{{ inventory_hostname }}" + +- name: Grant privileges to Nova database user + community.mysql.mysql_user: + name: "{{ openstack_db_user }}" + password: "{{ openstack_db_password }}" + host: "%" + priv: "{{ nova_db_name }}.*:ALL" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + delegate_to: "{{ inventory_hostname }}" + +- name: Grant privileges to Nova API database user + community.mysql.mysql_user: + name: "{{ openstack_db_user }}" + password: "{{ openstack_db_password }}" + host: "%" + priv: "{{ nova_api_db_name }}.*:ALL" + state: present + login_user: root + login_unix_socket: /var/run/mysqld/mysqld.sock + delegate_to: "{{ inventory_hostname }}" diff --git a/playbooks/ansible-openstack-nova/roles/nova/tasks/_install_compute.yml b/playbooks/ansible-openstack-nova/roles/nova/tasks/_install_compute.yml new file mode 100644 index 00000000..234c4dae --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/tasks/_install_compute.yml @@ -0,0 +1,105 @@ +--- +# Tasks for installing and configuring Nova components on the compute node. + +- name: Install Nova compute packages + ansible.builtin.apt: + name: + - nova-compute + - qemu-kvm + - libvirt-daemon-system + - libvirt-clients + - bridge-utils + - virtinst # For virt-install, useful for testing + state: present + notify: + - Restart nova-compute + - Restart libvirtd + +- name: Ensure libvirtd service is running and enabled + ansible.builtin.service: + name: libvirtd + state: started + enabled: yes + +- name: Ensure Nova Compute service is running and enabled + ansible.builtin.service: + name: nova-compute + state: started + enabled: yes + +- name: Configure Libvirt to listen on all interfaces for VNC + ansible.builtin.lineinfile: + path: /etc/libvirt/qemu.conf + regexp: '^#vnc_listen = "0.0.0.0"$' + line: 'vnc_listen = "0.0.0.0"' + state: present + notify: Restart libvirtd + +- name: Configure Libvirt to allow VNC connections from any address + ansible.builtin.lineinfile: + path: /etc/libvirt/qemu.conf + regexp: '^#vnc_allow_host_auto = 1$' + line: 'vnc_allow_host_auto = 1' + state: present + notify: Restart libvirtd + +- name: Add nova user to libvirt group + ansible.builtin.user: + name: nova + groups: libvirt + append: yes + +- name: Add libvirt user to kvm group + ansible.builtin.user: + name: libvirt-qemu + groups: kvm + append: yes + +- name: Ensure KVM module is loaded + ansible.builtin.modprobe: + name: kvm + state: present + +- name: Check CPU virtualization support + ansible.builtin.shell: | + if grep -q vmx /proc/cpuinfo; then + echo "intel" + elif grep -q svm /proc/cpuinfo; then + echo "amd" + else + echo "none" + fi + register: cpu_virt_support + changed_when: false + +- name: Ensure KVM_intel module is loaded with nested virtualization (if Intel CPU) + ansible.builtin.modprobe: + name: kvm_intel + state: present + params: nested=1 + when: cpu_virt_support.stdout == "intel" + ignore_errors: true # May not be Intel, or nested already enabled + +- name: Ensure KVM_amd module is loaded with nested virtualization (if AMD CPU) + ansible.builtin.modprobe: + name: kvm_amd + state: present + params: nested=1 + when: cpu_virt_support.stdout == "amd" + ignore_errors: true # May not be AMD, or nested already enabled + +- name: Create a bridge for instances (br-ex) + ansible.builtin.template: + src: interfaces.j2 + dest: /etc/network/interfaces.d/br-ex.cfg + owner: root + group: root + mode: '0644' + notify: Restart networking + +- name: Bring up the br-ex bridge + ansible.builtin.command: ifup br-ex + args: + creates: /sys/class/net/br-ex # Check if bridge exists + changed_when: true # Always report as changed if ifup runs + failed_when: false # Don't fail if bridge is already up diff --git a/playbooks/ansible-openstack-nova/roles/nova/tasks/_install_controller.yml b/playbooks/ansible-openstack-nova/roles/nova/tasks/_install_controller.yml new file mode 100644 index 00000000..dbfc54cf --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/tasks/_install_controller.yml @@ -0,0 +1,61 @@ +--- +# Tasks for installing and configuring Nova components on the controller node. + +- name: Install Nova controller packages + ansible.builtin.apt: + name: + - nova-api + - nova-scheduler + - nova-conductor + - nova-novncproxy + - nova-consoleproxy # For VNC console support + state: present + notify: + - Restart nova-api + - Restart nova-scheduler + - Restart nova-conductor + - Restart nova-novncproxy + - Restart nova-consoleproxy # ADDED: Notification for nova-consoleproxy restart + +- name: Ensure Nova API service is running and enabled + ansible.builtin.service: + name: nova-api + state: started + enabled: yes + +- name: Ensure Nova Scheduler service is running and enabled + ansible.builtin.service: + name: nova-scheduler + state: started + enabled: yes + +- name: Ensure Nova Conductor service is running and enabled + ansible.builtin.service: + name: nova-conductor + state: started + enabled: yes + +- name: Ensure Nova NoVNC Proxy service is running and enabled + ansible.builtin.service: + name: nova-novncproxy + state: started + enabled: yes + +- name: Ensure Nova Console Proxy service is running and enabled + ansible.builtin.service: + name: nova-consoleproxy + state: started + enabled: yes + +- name: Enable Nova API WSGI in Apache2 + ansible.builtin.file: + src: /usr/share/nova/wsgi-api.conf + dest: /etc/apache2/conf-enabled/wsgi-nova-api.conf + state: link + notify: Restart apache2 + +- name: Ensure Apache2 is running for Nova API WSGI + ansible.builtin.service: + name: apache2 + state: started + enabled: yes diff --git a/playbooks/ansible-openstack-nova/roles/nova/tasks/_keystone_registration.yml b/playbooks/ansible-openstack-nova/roles/nova/tasks/_keystone_registration.yml new file mode 100644 index 00000000..0c1ec5d8 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/tasks/_keystone_registration.yml @@ -0,0 +1,136 @@ +--- +# Tasks for registering Nova with Keystone on the controller. + +- name: Check if Nova service user exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack user show nova --domain Default + args: + executable: /bin/bash + register: nova_user_check + failed_when: nova_user_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Create Nova service user in Keystone + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack user create \ + --domain Default \ + --password "{{ nova_user_password }}" \ + nova 2>&1 | tee /var/log/ansible-nova-user.log + args: + executable: /bin/bash + when: nova_user_check.rc == 1 + register: nova_user_create + retries: 3 + delay: 5 + until: nova_user_create.rc == 0 or 'already exists' in nova_user_create.stderr + changed_when: nova_user_create.rc == 0 + failed_when: nova_user_create.rc != 0 and 'already exists' not in nova_user_create.stderr + environment: + OS_CLOUD: "" + +- name: Check if Nova user has admin role in service project + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack role assignment list \ + --user nova \ + --project service \ + --role admin \ + --user-domain Default \ + --project-domain Default --format value + args: + executable: /bin/bash + register: nova_role_assignment_check + failed_when: nova_role_assignment_check.rc != 0 + changed_when: false + environment: + OS_CLOUD: "" + +- name: Add admin role to Nova user in service project + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack role add \ + --user nova \ + --project service \ + admin \ + --user-domain Default \ + --project-domain Default 2>&1 | tee /var/log/ansible-nova-role.log + args: + executable: /bin/bash + when: nova_role_assignment_check.stdout == "" # Only add if assignment not found + register: nova_role_add + retries: 3 + delay: 5 + until: nova_role_add.rc == 0 or 'already has role' in nova_role_add.stderr + changed_when: nova_role_add.rc == 0 + failed_when: nova_role_add.rc != 0 and 'already has role' not in nova_role_add.stderr + environment: + OS_CLOUD: "" + +- name: Check if Nova service exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack service show nova + args: + executable: /bin/bash + register: nova_service_check + failed_when: nova_service_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Create Nova service in Keystone + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack service create \ + --name nova \ + --description "OpenStack Compute service" \ + compute 2>&1 | tee /var/log/ansible-nova-service.log + args: + executable: /bin/bash + when: nova_service_check.rc == 1 + register: nova_service_create + retries: 3 + delay: 5 + until: nova_service_create.rc == 0 or 'already exists' in nova_service_create.stderr + changed_when: nova_service_create.rc == 0 + failed_when: nova_service_create.rc != 0 and 'already exists' not in nova_service_create.stderr + environment: + OS_CLOUD: "" + +- name: Create or Update Nova endpoints in Keystone + ansible.builtin.shell: | + source {{ keystone_rc_file }} + # Check if endpoint exists. If not, create it. If it exists, ensure URL is correct. + if ! openstack endpoint show nova {{ item.interface }} &>/dev/null; then + echo "Creating Nova {{ item.interface }} endpoint..." + openstack endpoint create \ + --region "{{ openstack_region_name }}" \ + {{ item.interface }} \ + compute \ + "{{ item.url }}" 2>&1 | tee /var/log/ansible-nova-endpoint-{{ item.interface }}.log + else + echo "Updating Nova {{ item.interface }} endpoint..." + openstack endpoint set \ + --region "{{ openstack_region_name }}" \ + --url "{{ item.url }}" \ + {{ item.interface }} \ + compute 2>&1 | tee /var/log/ansible-nova-endpoint-{{ item.interface }}.log + fi + args: + executable: /bin/bash + loop: + - { interface: 'public', url: "{{ nova_public_url }}" } + - { interface: 'internal', url: "{{ nova_internal_url }}" } + - { interface: 'admin', url: "{{ nova_admin_url }}" } + register: nova_endpoint_result + retries: 3 + delay: 5 + until: nova_endpoint_result.rc == 0 + changed_when: "nova_endpoint_result.rc == 0 and ('created' in nova_endpoint_result.stdout or 'updated' in nova_endpoint_result.stdout)" + failed_when: nova_endpoint_result.rc != 0 + environment: + OS_CLOUD: "" diff --git a/playbooks/ansible-openstack-nova/roles/nova/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/nova/tasks/main.yml new file mode 100644 index 00000000..59d84d15 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/tasks/main.yml @@ -0,0 +1,21 @@ +--- +# Main entry point for the Nova installation role. + +- name: Include Nova database setup tasks + ansible.builtin.include_tasks: _db_setup.yml + when: inventory_hostname in groups['controllers'] + +- name: Include Nova Keystone registration tasks + ansible.builtin.include_tasks: _keystone_registration.yml + when: inventory_hostname in groups['controllers'] + +- name: Include Nova configuration tasks + ansible.builtin.include_tasks: _config.yml + +- name: Include Nova controller installation tasks + ansible.builtin.include_tasks: _install_controller.yml + when: inventory_hostname in groups['controllers'] + +- name: Include Nova compute installation tasks + ansible.builtin.include_tasks: _install_compute.yml + when: inventory_hostname in groups['computes'] diff --git a/playbooks/ansible-openstack-nova/roles/nova/templates/interfaces.j2 b/playbooks/ansible-openstack-nova/roles/nova/templates/interfaces.j2 new file mode 100644 index 00000000..6357de3f --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/templates/interfaces.j2 @@ -0,0 +1,12 @@ +# Network configuration for br-ex bridge on compute node + +auto br-ex +iface br-ex inet static + address {{ compute_ip }} # Or an IP from your instance network if different + netmask 255.255.255.0 # Adjust as per your network + gateway 192.168.56.1 # Assuming this is your gateway for the private network + bridge_ports none + bridge_fd 9 + bridge_hello 2 + bridge_maxwait 20 + bridge_stp off \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/nova/templates/nova.conf.j2 b/playbooks/ansible-openstack-nova/roles/nova/templates/nova.conf.j2 new file mode 100644 index 00000000..89bfc552 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/templates/nova.conf.j2 @@ -0,0 +1,65 @@ +# roles/nova/templates/nova.conf.j2 +# Nova configuration file for OpenStack + +[DEFAULT] +# General options +transport_url = rabbit://openstack:{{ rabbitmq_password }}@{{ rabbitmq_host }} +auth_strategy = keystone +use_neutron = True +firewall_driver = nova.virt.firewall.NoopFirewallDriver # For minimal, use Noop. In production, use ovs or iptables. +my_ip = {{ ansible_host }} # IP address of the node + +# Logging options +# verbose = true +# debug = true + +[api] +auth_strategy = keystone + +[api_database] +connection = {{ database_connection_base }}/{{ nova_api_db_name }} + +[database] +connection = {{ database_connection_base }}/{{ nova_db_name }} + +[glance] +api_servers = {{ glance_api_url }} + +[keystone_authtoken] +www_authenticate_uri = {{ keystone_public_url }} +auth_url = {{ keystone_admin_url }} +memcached_servers = localhost:11211 +auth_type = password +project_domain_name = Default +user_domain_name = Default +project_name = service +username = nova +password = {{ nova_user_password }} + +[oslo_concurrency] +lock_path = /var/lib/nova/tmp + +[placement] +region_name = {{ openstack_region_name }} +project_domain_name = Default +project_name = service +auth_type = password +user_domain_name = Default +username = placement +password = {{ placement_user_password }} +auth_url = {{ keystone_admin_url }} +# Explicitly tell Nova where the Placement API lives. +# This overrides service catalog discovery if there are issues or specific requirements. +endpoint_override = {{ placement_api_url }} # ADDED: Explicit endpoint override for Placement + +[vnc] +enabled = true +# The IP address of the controller node where the noVNC proxy runs +server_listen = 0.0.0.0 # Listen on all interfaces +server_proxyclient_address = {{ controller_ip }} # The IP clients connect to +# The base URL for the noVNC proxy, accessible from client browsers +# This should be the public IP of the controller +novncproxy_base_url = http://{{ controller_ip }}:6080/vnc_auto.html + +[wsgi] +api_paste_config = /etc/nova/api-paste.ini \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/nova/vars/main.yml b/playbooks/ansible-openstack-nova/roles/nova/vars/main.yml new file mode 100644 index 00000000..3886205d --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova/vars/main.yml @@ -0,0 +1,6 @@ +--- +# Role-specific variables for Nova. + +# Nova database names +nova_db_name: nova +nova_api_db_name: nova_api \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/nova_validation/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/nova_validation/tasks/main.yml new file mode 100644 index 00000000..2effe8f2 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/nova_validation/tasks/main.yml @@ -0,0 +1,513 @@ +--- +# Tasks for validating the Nova deployment. +# These tasks will primarily run on the controller node, as it has access to the OpenStack CLI. + +- name: Source admin-openrc.sh for OpenStack CLI environment + ansible.builtin.shell: | + source {{ keystone_rc_file }} + echo "OS_AUTH_URL=$OS_AUTH_URL" + echo "OS_USERNAME=$OS_USERNAME" + echo "OS_PROJECT_NAME=$OS_PROJECT_NAME" + args: + executable: /bin/bash + register: openrc_output + changed_when: false + when: inventory_hostname in groups['controllers'] + # This task is primarily for debugging and ensuring the environment variables are set. + +# --- Verify Nova services are running on controller --- +- name: Ensure Nova API service is running on controller + ansible.builtin.service: + name: nova-api + state: started + when: inventory_hostname in groups['controllers'] + changed_when: false # This is a check, not an intended change + +- name: Ensure Nova Scheduler service is running on controller + ansible.builtin.service: + name: nova-scheduler + state: started + when: inventory_hostname in groups['controllers'] + changed_when: false + +- name: Ensure Nova Conductor service is running on controller + ansible.builtin.service: + name: nova-conductor + state: started + when: inventory_hostname in groups['controllers'] + changed_when: false + +- name: Ensure Nova NoVNC Proxy service is running on controller + ansible.builtin.service: + name: nova-novncproxy + state: started + when: inventory_hostname in groups['controllers'] + changed_when: false + +- name: Ensure Nova Console Proxy service is running on controller + ansible.builtin.service: + name: nova-consoleproxy + state: started + when: inventory_hostname in groups['controllers'] + changed_when: false + +# --- Verify Nova compute service and libvirtd are running on compute node --- +- name: Ensure Nova Compute service is running on compute node + ansible.builtin.service: + name: nova-compute + state: started + when: inventory_hostname in groups['computes'] + changed_when: false + +- name: Ensure libvirtd service is running on compute node + ansible.builtin.service: + name: libvirtd + state: started + when: inventory_hostname in groups['computes'] + changed_when: false + +- name: Verify Nova endpoints are registered in Keystone + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack endpoint list --service compute --format json + args: + executable: /bin/bash + register: nova_endpoints_list + changed_when: false + when: inventory_hostname in groups['controllers'] + failed_when: nova_endpoints_list.rc != 0 or (nova_endpoints_list.stdout | from_json | length) == 0 + environment: + OS_CLOUD: "" + +- name: Display Nova service and endpoint info (from CLI output) + ansible.builtin.debug: + msg: "Nova endpoints: {{ nova_endpoints_list.stdout | from_json }}" + when: inventory_hostname in groups['controllers'] and nova_endpoints_list is defined + +- name: Check OpenStack service list (general health check) + ansible.builtin.command: openstack service list + register: service_list_output + changed_when: false + when: inventory_hostname in groups['controllers'] + environment: + OS_CLOUD: "{{ openstack_cloud_config }}" + +- name: Display OpenStack service list + ansible.builtin.debug: + msg: "{{ service_list_output.stdout }}" + when: inventory_hostname in groups['controllers'] + +- name: Check OpenStack endpoint list (general health check) + ansible.builtin.command: openstack endpoint list + register: endpoint_list_output + changed_when: false + when: inventory_hostname in groups['controllers'] + environment: + OS_CLOUD: "{{ openstack_cloud_config }}" + +- name: Display OpenStack endpoint list + ansible.builtin.debug: + msg: "{{ endpoint_list_output.stdout }}" + when: inventory_hostname in groups['controllers'] + +- name: Check Nova service status (nova service-list) + ansible.builtin.command: openstack compute service list + register: nova_service_status + changed_when: false + when: inventory_hostname in groups['controllers'] + environment: + OS_CLOUD: "{{ openstack_cloud_config }}" + +- name: Display Nova service status + ansible.builtin.debug: + msg: "{{ nova_service_status.stdout }}" + when: inventory_hostname in groups['controllers'] + +- name: Ensure all Nova services are 'up' + ansible.builtin.assert: + that: + - "' down ' not in nova_service_status.stdout" + - "'XXX' not in nova_service_status.stdout" + fail_msg: "One or more Nova services are down or disabled!" + success_msg: "All Nova services are up and enabled." + when: inventory_hostname in groups['controllers'] + +- name: Download CirrOS image (if not already present) + ansible.builtin.get_url: + url: "{{ cirros_image_url }}" + dest: "/tmp/{{ cirros_image_name }}" + mode: '0644' + register: cirros_download + until: cirros_download is success + retries: 5 + delay: 10 + when: inventory_hostname in groups['controllers'] + +- name: Check if CirrOS image exists in Glance + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack image show "{{ cirros_image_glance_name }}" --format value -c id + args: + executable: /bin/bash + register: cirros_image_check + failed_when: cirros_image_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Upload CirrOS image to Glance + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack image create "{{ cirros_image_glance_name }}" \ + --file "/tmp/{{ cirros_image_name }}" \ + --disk-format qcow2 \ + --container-format bare \ + --public 2>&1 | tee /var/log/ansible-glance-image-upload.log + args: + executable: /bin/bash + when: cirros_image_check.rc == 1 + register: cirros_upload_result + retries: 3 + delay: 10 + until: cirros_upload_result.rc == 0 or 'already exists' in cirros_upload_result.stderr + changed_when: cirros_upload_result.rc == 0 + failed_when: cirros_upload_result.rc != 0 and 'already exists' not in cirros_upload_result.stderr + environment: + OS_CLOUD: "" + +- name: Check if test network exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack network show "{{ test_network_name }}" --format value -c id + args: + executable: /bin/bash + register: test_network_check + failed_when: test_network_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Create a test network + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack network create "{{ test_network_name }}" \ + --provider-physical-network "{{ test_physical_network }}" \ + --provider-network-type flat \ + --share 2>&1 | tee /var/log/ansible-network-create.log + args: + executable: /bin/bash + when: test_network_check.rc == 1 + register: test_network_create_result + retries: 3 + delay: 5 + until: test_network_create_result.rc == 0 or 'already exists' in test_network_create_result.stderr + changed_when: test_network_create_result.rc == 0 + failed_when: test_network_create_result.rc != 0 and 'already exists' not in test_network_create_result.stderr + environment: + OS_CLOUD: "" + +- name: Check if test subnet exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack subnet show "{{ test_subnet_name }}" --format value -c id + args: + executable: /bin/bash + register: test_subnet_check + failed_when: test_subnet_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Create a test subnet + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack subnet create "{{ test_subnet_name }}" \ + --network "{{ test_network_name }}" \ + --subnet-range "{{ test_subnet_cidr }}" \ + --gateway "{{ test_subnet_gateway }}" \ + --dns-nameserver "{{ test_dns_nameservers | join(',') }}" \ + --enable-dhcp 2>&1 | tee /var/log/ansible-subnet-create.log + args: + executable: /bin/bash + when: test_subnet_check.rc == 1 + register: test_subnet_create_result + retries: 3 + delay: 5 + until: test_subnet_create_result.rc == 0 or 'already exists' in test_subnet_create_result.stderr + changed_when: test_subnet_create_result.rc == 0 + failed_when: test_subnet_create_result.rc != 0 and 'already exists' not in test_subnet_create_result.stderr + environment: + OS_CLOUD: "" + +- name: Check if test security group exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack security group show "{{ test_security_group_name }}" --format value -c id + args: + executable: /bin/bash + register: test_security_group_check + failed_when: test_security_group_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Create a test security group to allow SSH and ICMP + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack security group create "{{ test_security_group_name }}" \ + --description "Security group for test VMs (SSH and ICMP)" 2>&1 | tee /var/log/ansible-secgroup-create.log + args: + executable: /bin/bash + when: test_security_group_check.rc == 1 + register: test_security_group_create_result + retries: 3 + delay: 5 + until: test_security_group_create_result.rc == 0 or 'already exists' in test_security_group_create_result.stderr + changed_when: test_security_group_create_result.rc == 0 + failed_when: test_security_group_create_result.rc != 0 and 'already exists' not in test_security_group_create_result.stderr + environment: + OS_CLOUD: "" + +- name: Check if SSH rule exists in test security group + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack security group rule list "{{ test_security_group_name }}" \ + --protocol tcp --port 22 --direction ingress --remote-ip 0.0.0.0/0 --format value -c id + args: + executable: /bin/bash + register: ssh_rule_check + failed_when: ssh_rule_check.rc != 0 + changed_when: false + environment: + OS_CLOUD: "" + +- name: Add SSH rule to test security group + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack security group rule create "{{ test_security_group_name }}" \ + --protocol tcp --dst-port 22 --ingress --remote-ip 0.0.0.0/0 2>&1 | tee /var/log/ansible-secgroup-ssh-rule.log + args: + executable: /bin/bash + when: ssh_rule_check.stdout == "" + register: ssh_rule_create_result + retries: 3 + delay: 5 + until: ssh_rule_create_result.rc == 0 or 'already exists' in ssh_rule_create_result.stderr + changed_when: ssh_rule_create_result.rc == 0 + failed_when: ssh_rule_create_result.rc != 0 and 'already exists' not in ssh_rule_create_result.stderr + environment: + OS_CLOUD: "" + +- name: Check if ICMP rule exists in test security group + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack security group rule list "{{ test_security_group_name }}" \ + --protocol icmp --direction ingress --remote-ip 0.0.0.0/0 --format value -c id + args: + executable: /bin/bash + register: icmp_rule_check + failed_when: icmp_rule_check.rc != 0 + changed_when: false + environment: + OS_CLOUD: "" + +- name: Add ICMP rule to test security group + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack security group rule create "{{ test_security_group_name }}" \ + --protocol icmp --ingress --remote-ip 0.0.0.0/0 2>&1 | tee /var/log/ansible-secgroup-icmp-rule.log + args: + executable: /bin/bash + when: icmp_rule_check.stdout == "" + register: icmp_rule_create_result + retries: 3 + delay: 5 + until: icmp_rule_create_result.rc == 0 or 'already exists' in icmp_rule_create_result.stderr + changed_when: icmp_rule_create_result.rc == 0 + failed_when: icmp_rule_create_result.rc != 0 and 'already exists' not in icmp_rule_create_result.stderr + environment: + OS_CLOUD: "" + +- name: Check if SSH key pair exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack keypair show "{{ test_keypair_name }}" --format value -c id + args: + executable: /bin/bash + register: test_keypair_check + failed_when: test_keypair_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Generate SSH key pair for instance access + ansible.builtin.shell: | + source {{ keystone_rc_file }} + # Ensure public key file exists on controller + if [ ! -f ~/.ssh/id_rsa.pub ]; then + ssh-keygen -t rsa -f ~/.ssh/id_rsa -N "" + fi + openstack keypair create "{{ test_keypair_name }}" \ + --public-key ~/.ssh/id_rsa.pub 2>&1 | tee /var/log/ansible-keypair-create.log + args: + executable: /bin/bash + when: test_keypair_check.rc == 1 + register: test_keypair_create_result + retries: 3 + delay: 5 + until: test_keypair_create_result.rc == 0 or 'already exists' in test_keypair_create_result.stderr + changed_when: test_keypair_create_result.rc == 0 + failed_when: test_keypair_create_result.rc != 0 and 'already exists' not in test_keypair_create_result.stderr + environment: + OS_CLOUD: "" + +- name: Set permissions for private key file (on controller host) + ansible.builtin.file: + path: "/tmp/{{ test_keypair_name }}.pem" + mode: '0600' + when: inventory_hostname in groups['controllers'] + +- name: Check if test instance exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack server show "{{ test_instance_name }}" --format value -c id + args: + executable: /bin/bash + register: test_instance_check + failed_when: test_instance_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Launch a test instance + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack server create "{{ test_instance_name }}" \ + --image "{{ cirros_image_glance_name }}" \ + --flavor "{{ test_flavor_name }}" \ + --network "{{ test_network_name }}" \ + --security-group "{{ test_security_group_name }}" \ + --key-name "{{ test_keypair_name }}" \ + --wait \ + --timeout 300 2>&1 | tee /var/log/ansible-instance-launch.log + args: + executable: /bin/bash + when: test_instance_check.rc == 1 + register: test_instance_launch_result + retries: 3 + delay: 10 + until: test_instance_launch_result.rc == 0 or 'already exists' in test_instance_launch_result.stderr or 'Build of instance' in test_instance_launch_result.stderr + changed_when: test_instance_launch_result.rc == 0 + failed_when: test_instance_launch_result.rc != 0 and 'already exists' not in test_instance_launch_result.stderr + environment: + OS_CLOUD: "" + +- name: Get instance details for IP address + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack server show "{{ test_instance_name }}" --format json + args: + executable: /bin/bash + register: instance_details_raw + changed_when: false + when: inventory_hostname in groups['controllers'] + environment: + OS_CLOUD: "" + +- name: Extract instance IP address for ping test + ansible.builtin.set_fact: + instance_ip: "{{ (instance_details_raw.stdout | from_json).addresses[test_network_name][0].addr }}" + when: inventory_hostname in groups['controllers'] and instance_details_raw.stdout is defined and (instance_details_raw.stdout | from_json).addresses is defined and (instance_details_raw.stdout | from_json).addresses[test_network_name] is defined + +- name: Ping the launched instance to verify network connectivity + ansible.builtin.wait_for_connection: + host: "{{ instance_ip }}" + port: 22 + delay: 10 + timeout: 180 + when: inventory_hostname in groups['controllers'] and instance_ip is defined + +- name: Ping test successful + ansible.builtin.debug: + msg: "Successfully launched and pinged the test instance {{ test_instance_name }} at {{ instance_ip }}! Nova deployment is functional." + when: inventory_hostname in groups['controllers'] + +# --- CLEANUP TASKS --- + +- name: "Clean up: Delete test instance" + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack server delete "{{ test_instance_name }}" --wait --timeout 180 2>&1 | tee /var/log/ansible-instance-delete.log + args: + executable: /bin/bash + when: inventory_hostname in groups['controllers'] + register: instance_delete_result + failed_when: instance_delete_result.rc != 0 and 'No server with a name or ID of' not in instance_delete_result.stderr + changed_when: instance_delete_result.rc == 0 + environment: + OS_CLOUD: "" + +- name: "Clean up: Delete test keypair" + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack keypair delete "{{ test_keypair_name }}" 2>&1 | tee /var/log/ansible-keypair-delete.log + args: + executable: /bin/bash + when: inventory_hostname in groups['controllers'] + register: keypair_delete_result + failed_when: keypair_delete_result.rc != 0 and 'No keypair with a name or ID of' not in keypair_delete_result.stderr + changed_when: keypair_delete_result.rc == 0 + environment: + OS_CLOUD: "" + +- name: "Clean up: Delete test security group" + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack security group delete "{{ test_security_group_name }}" 2>&1 | tee /var/log/ansible-secgroup-delete.log + args: + executable: /bin/bash + when: inventory_hostname in groups['controllers'] + register: secgroup_delete_result + failed_when: secgroup_delete_result.rc != 0 and 'No security group with a name or ID of' not in secgroup_delete_result.stderr + changed_when: secgroup_delete_result.rc == 0 + environment: + OS_CLOUD: "" + +- name: "Clean up: Delete test subnet" + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack subnet delete "{{ test_subnet_name }}" 2>&1 | tee /var/log/ansible-subnet-delete.log + args: + executable: /bin/bash + when: inventory_hostname in groups['controllers'] + register: subnet_delete_result + failed_when: subnet_delete_result.rc != 0 and 'No subnet with a name or ID of' not in subnet_delete_result.stderr + changed_when: subnet_delete_result.rc == 0 + environment: + OS_CLOUD: "" + +- name: "Clean up: Delete test network" + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack network delete "{{ test_network_name }}" 2>&1 | tee /var/log/ansible-network-delete.log + args: + executable: /bin/bash + when: inventory_hostname in groups['controllers'] + register: network_delete_result + failed_when: network_delete_result.rc != 0 and 'No network with a name or ID of' not in network_delete_result.stderr + changed_when: network_delete_result.rc == 0 + environment: + OS_CLOUD: "" + +- name: "Clean up: Delete CirrOS image from Glance" + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack image delete "{{ cirros_image_glance_name }}" 2>&1 | tee /var/log/ansible-image-delete.log + args: + executable: /bin/bash + when: inventory_hostname in groups['controllers'] + register: image_delete_result + failed_when: image_delete_result.rc != 0 and 'No image with a name or ID of' not in image_delete_result.stderr + changed_when: image_delete_result.rc == 0 + environment: + OS_CLOUD: "" diff --git a/playbooks/ansible-openstack-nova/roles/placement_minimal/handlers/main.yml b/playbooks/ansible-openstack-nova/roles/placement_minimal/handlers/main.yml new file mode 100644 index 00000000..47e50609 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/placement_minimal/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart apache2 + ansible.builtin.service: + name: apache2 + state: restarted diff --git a/playbooks/ansible-openstack-nova/roles/placement_minimal/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/placement_minimal/tasks/main.yml new file mode 100644 index 00000000..d1d8ee85 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/placement_minimal/tasks/main.yml @@ -0,0 +1,186 @@ +--- +- name: Install Placement API packages and OpenStack client + ansible.builtin.apt: + name: + - placement-api + - python3-openstackclient # Ensure openstack client is available + - bash # Ensure bash is available for shell scripts + state: present + notify: Restart apache2 # Placement also runs as WSGI under Apache + become: yes # Ensure this task runs with sudo + +- name: Create Placement database + community.mysql.mysql_db: + name: "{{ placement_db_name }}" + state: present + delegate_to: "{{ inventory_hostname }}" + +- name: Grant privileges to Placement database user + community.mysql.mysql_user: + name: "{{ openstack_db_user }}" + password: "{{ openstack_db_password }}" + host: "%" + priv: "{{ placement_db_name }}.*:ALL" + state: present + delegate_to: "{{ inventory_hostname }}" + +- name: Populate the Placement database + ansible.builtin.command: su -s /bin/sh -c "placement-manage db sync" placement + args: + creates: /var/lib/placement/placement.sqlite # Prevent re-running if DB is already synced + become: yes + become_user: placement + register: placement_db_sync_result + changed_when: "'No changes to make' not in placement_db_sync_result.stderr" + +- name: Check if Placement service user exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack user show placement --domain Default + args: + executable: /bin/bash + register: placement_user_check + failed_when: placement_user_check.rc not in [0, 1] # 0 if exists, 1 if not found + changed_when: false + environment: + OS_CLOUD: "" # Ensure no existing cloud env vars interfere + +- name: Create Placement service user in Keystone + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack user create \ + --domain Default \ + --password "{{ placement_user_password }}" \ + placement 2>&1 | tee /var/log/ansible-placement-user.log + args: + executable: /bin/bash + when: placement_user_check.rc == 1 # Only create if user does not exist + register: placement_user_create + retries: 3 + delay: 5 + until: placement_user_create.rc == 0 or 'already exists' in placement_user_create.stderr # Robust idempotency + changed_when: placement_user_create.rc == 0 # Only changed if creation was successful + failed_when: placement_user_create.rc != 0 and 'already exists' not in placement_user_create.stderr # Fail only on true errors + environment: + OS_CLOUD: "" + +- name: Check if Placement user has admin role in service project + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack role assignment list \ + --user placement \ + --project service \ + --role admin \ + --user-domain Default \ + --project-domain Default --format value + args: + executable: /bin/bash + register: placement_role_assignment_check + failed_when: placement_role_assignment_check.rc != 0 + changed_when: false + environment: + OS_CLOUD: "" + +- name: Add admin role to Placement user in service project + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack role add \ + --user placement \ + --project service \ + admin \ + --user-domain Default \ + --project-domain Default 2>&1 | tee /var/log/ansible-placement-role.log + args: + executable: /bin/bash + when: placement_role_assignment_check.stdout == "" # Only add if assignment not found + register: placement_role_add + retries: 3 + delay: 5 + until: placement_role_add.rc == 0 or 'already has role' in placement_role_add.stderr + changed_when: placement_role_add.rc == 0 + failed_when: placement_role_add.rc != 0 and 'already has role' not in placement_role_add.stderr + environment: + OS_CLOUD: "" + +- name: Check if Placement service exists + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack service show placement + args: + executable: /bin/bash + register: placement_service_check + failed_when: placement_service_check.rc not in [0, 1] + changed_when: false + environment: + OS_CLOUD: "" + +- name: Create Placement service in Keystone + ansible.builtin.shell: | + source {{ keystone_rc_file }} + openstack service create \ + --name placement \ + --description "OpenStack Placement service" \ + placement 2>&1 | tee /var/log/ansible-placement-service.log + args: + executable: /bin/bash + when: placement_service_check.rc == 1 # Only create if service does not exist + register: placement_service_create + retries: 3 + delay: 5 + until: placement_service_create.rc == 0 or 'already exists' in placement_service_create.stderr + changed_when: placement_service_create.rc == 0 + failed_when: placement_service_create.rc != 0 and 'already exists' not in placement_service_create.stderr + environment: + OS_CLOUD: "" + +- name: Create or Update Placement endpoints in Keystone + ansible.builtin.shell: | + source {{ keystone_rc_file }} + # Check if endpoint exists. If not, create it. If it exists, ensure URL is correct. + if ! openstack endpoint show placement {{ item.interface }} &>/dev/null; then + echo "Creating Placement {{ item.interface }} endpoint..." + openstack endpoint create \ + --region "{{ openstack_region_name }}" \ + {{ item.interface }} \ + placement \ + "{{ item.url }}" 2>&1 | tee /var/log/ansible-placement-endpoint-{{ item.interface }}.log + else + echo "Updating Placement {{ item.interface }} endpoint..." + openstack endpoint set \ + --region "{{ openstack_region_name }}" \ + --url "{{ item.url }}" \ + {{ item.interface }} \ + placement 2>&1 | tee /var/log/ansible-placement-endpoint-{{ item.interface }}.log + fi + args: + executable: /bin/bash + loop: + - { interface: 'public', url: "{{ placement_api_url }}" } + - { interface: 'internal', url: "{{ placement_api_url }}" } + - { interface: 'admin', url: "{{ placement_api_url }}" } + register: placement_endpoint_result + retries: 3 + delay: 5 + until: placement_endpoint_result.rc == 0 + changed_when: "placement_endpoint_result.rc == 0 and ('created' in placement_endpoint_result.stdout or 'updated' in placement_endpoint_result.stdout)" # More precise changed_when + failed_when: placement_endpoint_result.rc != 0 + environment: + OS_CLOUD: "" + +- name: Configure Placement API (placement.conf) + ansible.builtin.template: + src: placement.conf.j2 + dest: /etc/placement/placement.conf + owner: placement + group: placement + mode: '0640' + notify: Restart apache2 + +# Note: Apache2 setup for Placement is usually handled by the package itself +# or by a common Apache role if we had one. For minimal, we assume it's linked +# by the package installation. We just need to ensure Apache is running. +- name: Ensure Apache2 is running for Placement WSGI + ansible.builtin.service: + name: apache2 + state: started + enabled: yes diff --git a/playbooks/ansible-openstack-nova/roles/placement_minimal/templates/placement.conf.j2 b/playbooks/ansible-openstack-nova/roles/placement_minimal/templates/placement.conf.j2 new file mode 100644 index 00000000..6cbbd4c1 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/placement_minimal/templates/placement.conf.j2 @@ -0,0 +1,36 @@ +# Minimal Placement API configuration for OpenStack + +[DEFAULT] +# The verbose option will make the log output more verbose. +# verbose = true + +# The debug option will make the log output really verbose. +# debug = true + +# Connection string for the database. +# For Placement, the database connection is typically defined directly. + +[placement_database] +connection = {{ database_connection_base }}/{{ placement_db_name }} + +[api] +# The host and port for the Placement API to listen on. +# This should match the public/internal/admin endpoint URLs. +# bind_host = 0.0.0.0 # Not explicitly needed if running under WSGI +# bind_port = 8778 # Not explicitly needed if running under WSGI + +[keystone_authtoken] +# The URL to the Keystone authentication server. +www_authenticate_uri = {{ keystone_public_url }} +auth_url = {{ keystone_admin_url }} +memcached_servers = localhost:11211 +auth_type = password +project_domain_name = Default +user_domain_name = Default +project_name = service +username = placement +password = {{ placement_user_password }} + +[oslo_middleware] +# Enable parsing of proxy headers. +enable_proxy_headers_parsing = true diff --git a/playbooks/ansible-openstack-nova/roles/rabbitmq/handlers/main.yml b/playbooks/ansible-openstack-nova/roles/rabbitmq/handlers/main.yml new file mode 100644 index 00000000..581eb577 --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/rabbitmq/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart rabbitmq-server + ansible.builtin.service: + name: rabbitmq-server + state: restarted \ No newline at end of file diff --git a/playbooks/ansible-openstack-nova/roles/rabbitmq/tasks/main.yml b/playbooks/ansible-openstack-nova/roles/rabbitmq/tasks/main.yml new file mode 100644 index 00000000..97c90b1a --- /dev/null +++ b/playbooks/ansible-openstack-nova/roles/rabbitmq/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Install RabbitMQ server + ansible.builtin.apt: + name: rabbitmq-server + state: present + notify: Restart rabbitmq-server + +- name: Ensure RabbitMQ service is running and enabled + ansible.builtin.service: + name: rabbitmq-server + state: started + enabled: yes + +- name: Create RabbitMQ OpenStack user + community.rabbitmq.rabbitmq_user: + user: openstack + password: "{{ rabbitmq_password }}" + tags: administrator + state: present + delegate_to: "{{ inventory_hostname }}" # Ensure this runs on the RabbitMQ host + +- name: Set permissions for RabbitMQ OpenStack user on / virtual host + community.rabbitmq.rabbitmq_user: + user: openstack + vhost: / + configure_priv: ".*" + read_priv: ".*" + write_priv: ".*" + state: present + delegate_to: "{{ inventory_hostname }}" + +- name: Ensure RabbitMQ default guest user is removed (for security) + community.rabbitmq.rabbitmq_user: + user: guest + state: absent + delegate_to: "{{ inventory_hostname }}" + ignore_errors: yes diff --git a/playbooks/ansible-openstack-nova/setup.sh b/playbooks/ansible-openstack-nova/setup.sh new file mode 100755 index 00000000..5afaea38 --- /dev/null +++ b/playbooks/ansible-openstack-nova/setup.sh @@ -0,0 +1,530 @@ +#!/bin/sh +# setup.sh +# Installs Vagrant, libvirt, vagrant-libvirt, performs host checks, provisions Vagrant VMs with Ansible, and optionally triggers cleanup. +# Production-ready with robust error handling, retries, and resource validation. + +# Usage: +# ./setup.sh # Basic setup +# ./setup.sh --force-provision # Force Ansible provisioning +# ./setup.sh --offline # Offline mode (requires pre-installed boxes) +# VAGRANT_BOX=ubuntu2004 ./setup.sh # Use a specific box +# +# For cleanup: ./cleanup.sh + +# Network configuration - can be overridden with environment variables +CONTROLLER_IP="${CONTROLLER_IP:-192.168.56.10}" +COMPUTE_IP="${COMPUTE_IP:-192.168.56.11}" + +set -e + +# ANSI color codes +COLOR_RED="\033[31m" +COLOR_GREEN="\033[32m" +COLOR_YELLOW="\033[33m" +COLOR_BOLD="\033[1m" +COLOR_UNDERLINE="\033[4m" +COLOR_RESET="\033[0m" + +# Logging functions +log_section() { + echo "${COLOR_BOLD}${COLOR_UNDERLINE}===== $1 =====${COLOR_RESET}" +} + +log_info() { + echo "${COLOR_GREEN}[INFO] $(date '+%Y-%m-%d %H:%M:%S') - $1${COLOR_RESET}" +} + +log_warning() { + echo "${COLOR_YELLOW}[WARNING] $(date '+%Y-%m-%d %H:%M:%S') - $1${COLOR_RESET}" +} + +log_error() { + echo "${COLOR_RED}[ERROR] $(date '+%Y-%m-%d %H:%M:%S') - $1${COLOR_RESET}" >&2 + exit 1 +} + +# Parse arguments +CLEANUP=false +FORCE_PROVISION=false +OFFLINE_MODE=false +TIMEOUT=3600 # 1 hour default timeout +while [ $# -gt 0 ]; do + case "$1" in + --cleanup) CLEANUP=true; shift ;; + --force-provision) FORCE_PROVISION=true; shift ;; + --offline) OFFLINE_MODE=true; shift ;; + --timeout=*) + TIMEOUT=$(echo "$1" | cut -d= -f2) + shift + ;; + *) log_error "Unknown argument: $1" ;; + esac +done + +log_section "Starting Setup" + +# Ensure USER is set +USER="${USER:-$(whoami)}" +[ -z "$USER" ] && log_error "Cannot determine user." +if [ "$USER" = "root" ]; then + log_warning "Running as root is not recommended. Consider using a non-root user (e.g., 'ubuntu') for better security." +fi + +# Check host resources +log_section "Checking Host Resources" +MIN_MEMORY_MB=8192 +MIN_CPUS=2 +AVAILABLE_MEMORY_MB=$(free -m | awk '/Mem:/ {print $2}') +AVAILABLE_CPUS=$(lscpu | grep "^CPU(s):" | awk '{print $2}') +if [ "$AVAILABLE_MEMORY_MB" -lt "$MIN_MEMORY_MB" ]; then + log_warning "Host memory ($AVAILABLE_MEMORY_MB MB) is below recommended $MIN_MEMORY_MB MB. Provisioning may be slow or fail." +fi +if [ "$AVAILABLE_CPUS" -lt "$MIN_CPUS" ]; then + log_warning "Host CPUs ($AVAILABLE_CPUS) are below recommended $MIN_CPUS. Provisioning may be slow or fail." +fi +log_info "Host resources: $AVAILABLE_MEMORY_MB MB memory, $AVAILABLE_CPUS CPUs." + +# Detect operating system +log_section "Detecting Operating System" +if [ -f /etc/debian_version ]; then + DISTRO=debian +elif [ -f /etc/redhat-release ]; then + DISTRO=rhel +else + log_error "Unsupported OS. This script supports Debian/Ubuntu and RHEL/CentOS." +fi +log_info "Detected OS: $DISTRO." + +# Check for package manager lock +log_section "Checking Package Manager Lock" +if [ "$DISTRO" = debian ]; then + for lock in /var/lib/dpkg/lock-frontend /var/lib/apt/lists/lock /var/cache/apt/archives/lock; do + if sudo fuser "$lock" >/dev/null 2>&1; then + log_error "apt lock detected at $lock. Please wait or resolve manually." + fi + done +elif [ "$DISTRO" = rhel ]; then + if sudo fuser /var/run/dnf.pid >/dev/null 2>&1; then + log_error "dnf is locked by another process. Please wait or resolve manually." + fi +fi +log_info "No package manager lock detected." + +# Network diagnostics +log_section "Network Diagnostics" +if [ "$OFFLINE_MODE" = false ]; then + log_info "Checking network connectivity..." + if ! ping -c 1 8.8.8.8 >/dev/null 2>&1; then + log_warning "Cannot ping 8.8.8.8. Network connectivity may be limited." + else + log_info "Basic network connectivity is working." + fi + + if ! nslookup google.com >/dev/null 2>&1; then + log_warning "DNS resolution failed. This may cause issues with downloading resources." + else + log_info "DNS resolution is working." + fi +else + log_info "Offline mode enabled. Skipping network checks." +fi + +# Install host system dependencies +log_section "Installing Host System Dependencies" +if [ "$DISTRO" = debian ]; then + i=1 + while [ "$i" -le 3 ]; do + if stdbuf -oL sudo apt-get update -q; then + break + else + log_warning "Retry $i: apt-get update failed. Retrying in 5 seconds..." + sleep 5 + i=$((i + 1)) + fi + done + [ "$i" -gt 3 ] && log_error "Failed to update apt after 3 retries." + stdbuf -oL sudo apt-get install -y -q wget lsb-release qemu-kvm libvirt-daemon-system libvirt-clients bridge-utils virt-manager dnsmasq-base ruby-full build-essential libxml2-dev libxslt1-dev libvirt-dev zlib1g-dev python3-venv python3-pip || \ + log_error "Failed to install Debian/Ubuntu host dependencies." +elif [ "$DISTRO" = rhel ]; then + i=1 + while [ "$i" -le 3 ]; do + if stdbuf -oL sudo dnf install -y -q dnf-utils qemu-kvm libvirt virt-install bridge-utils virt-manager libguestfs-tools ruby-devel gcc libxml2-devel libxslt-devel libvirt-devel zlib-devel make python3-virtualenv python3-pip; then + break + else + log_warning "Retry $i: dnf install failed. Retrying in 5 seconds..." + sleep 5 + i=$((i + 1)) + fi + done + [ "$i" -gt 3 ] && log_error "Failed to install RHEL dependencies after 3 retries." +fi +log_info "Host dependencies installed." + +# Install Vagrant +log_section "Installing Vagrant" +VAGRANT_MIN_VERSION="2.4.1" +if ! command -v vagrant >/dev/null 2>&1; then + log_info "Vagrant not found. Installing Vagrant..." + if [ "$DISTRO" = debian ]; then + # Ensure lsb-release is installed + if ! command -v lsb_release >/dev/null 2>&1; then + log_info "Installing lsb-release..." + stdbuf -oL sudo apt-get install -y -q lsb-release || log_error "Failed to install lsb-release." + fi + # Get codename from /etc/os-release or lsb_release + UBUNTU_CODENAME="" + if [ -f /etc/os-release ]; then + UBUNTU_CODENAME=$(grep -E "^(UBUNTU_CODENAME|VERSION_CODENAME)=" /etc/os-release | cut -d= -f2 | tr -d '\r' | head -n1) + fi + [ -z "$UBUNTU_CODENAME" ] && UBUNTU_CODENAME=$(lsb_release -cs 2>/dev/null | tr -d '\r') + [ -z "$UBUNTU_CODENAME" ] && UBUNTU_CODENAME="noble" # Fallback for minimal images (e.g., Ubuntu 24.04) + log_info "Using Ubuntu codename: $UBUNTU_CODENAME" + wget -q -O - https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg || \ + log_error "Failed to download HashiCorp GPG key." + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $UBUNTU_CODENAME main" | \ + sudo tee /etc/apt/sources.list.d/hashicorp.list || log_error "Failed to add HashiCorp APT repository." + stdbuf -oL sudo apt-get update -q || log_error "Failed to update APT after adding HashiCorp repository." + stdbuf -oL sudo apt-get install -y -q vagrant || log_error "Failed to install Vagrant." + elif [ "$DISTRO" = rhel ]; then + sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo || log_error "Failed to add HashiCorp DNF repository." + stdbuf -oL sudo dnf install -y -q vagrant || log_error "Failed to install Vagrant." + fi +else + log_info "Vagrant found. Checking version..." + VAGRANT_VERSION=$(vagrant --version | awk '{print $2}') + if [ "$(printf '%s\n%s' "$VAGRANT_VERSION" "$VAGRANT_MIN_VERSION" | sort -V | head -n1)" != "$VAGRANT_MIN_VERSION" ]; then + log_warning "Vagrant version $VAGRANT_VERSION is older than recommended $VAGRANT_MIN_VERSION. Consider upgrading." + else + log_info "Vagrant version $VAGRANT_VERSION meets minimum requirements." + fi +fi +log_info "Vagrant installed/verified (version: $(vagrant --version | awk '{print $2}'))." + +# Ensure libvirt default network exists and is active +log_section "Configuring libvirt Default Network" +if ! virsh net-list --all | grep -q " default"; then + log_info "libvirt default network not found. Creating it..." + # Create default network XML + cat > /tmp/default_network.xml << 'EOF' + + default + 9a05da11-e96b-47f3-8253-a3a482e445f5 + + + + + + + + + + + + + +EOF + virsh net-define /tmp/default_network.xml || log_error "Failed to define libvirt default network." + rm -f /tmp/default_network.xml + log_info "libvirt default network created." +fi + +if ! virsh net-list --all | grep -q " default.*active"; then + log_info "Starting libvirt default network..." + virsh net-start default || log_error "Failed to start libvirt default network." + virsh net-autostart default || log_warning "Failed to set libvirt default network to autostart." +fi +log_info "libvirt default network is active." + +# Add user to libvirt group and ensure libvirtd is running +log_section "Configuring User Permissions" +getent group libvirt >/dev/null || log_error "'libvirt' group does not exist." + +# Ensure libvirtd service is running +log_info "Ensuring libvirtd service is running..." +if ! sudo systemctl is-active --quiet libvirtd; then + log_info "Starting libvirtd service..." + sudo systemctl start libvirtd || log_error "Failed to start libvirtd service." +fi +sudo systemctl enable libvirtd || log_warning "Failed to enable libvirtd service." + +if [ "$USER" = "root" ]; then + log_info "Running as root; skipping libvirt group check, as root has full access." +elif id -nG "$USER" | grep -q libvirt; then + log_info "User '$USER' is already in 'libvirt' group." +else + log_info "Adding user '$USER' to 'libvirt' group..." + sudo usermod -aG libvirt "$USER" || log_error "Failed to add user '$USER' to 'libvirt' group." + log_info "User '$USER' added to 'libvirt' group. Group change will take effect after re-login." + log_warning "You may need to log out and back in, or run 'newgrp libvirt' for group changes to take effect." + + # Try to apply group change in current session + if command -v sg >/dev/null 2>&1; then + log_info "Attempting to apply group change in current session..." + exec sg libvirt -c "$0 $*" + else + log_warning "sg command not found. Group changes will take effect after re-login." + fi +fi + +# Additional permission fix for libvirt socket +log_info "Ensuring proper libvirt socket permissions..." +if [ -S /var/run/libvirt/libvirt-sock ]; then + sudo chmod 666 /var/run/libvirt/libvirt-sock || log_warning "Failed to set libvirt socket permissions." +fi + +# Install/Update vagrant-libvirt plugin +log_section "Configuring vagrant-libvirt Plugin" +VAGRANT_LIBVIRT_MIN_VERSION="0.12.2" +if vagrant plugin list | grep -q vagrant-libvirt; then + log_info "vagrant-libvirt plugin found. Checking version and updating if needed..." + VAGRANT_LIBVIRT_VERSION=$(vagrant plugin list | grep vagrant-libvirt | awk '{print $2}' | tr -d '()') + if [ "$(printf '%s\n%s' "$VAGRANT_LIBVIRT_VERSION" "$VAGRANT_LIBVIRT_MIN_VERSION" | sort -V | head -n1)" != "$VAGRANT_LIBVIRT_MIN_VERSION" ]; then + log_warning "vagrant-libvirt version $VAGRANT_LIBVIRT_VERSION is older than recommended $VAGRANT_LIBVIRT_MIN_VERSION. Updating..." + stdbuf -oL vagrant plugin update vagrant-libvirt || log_warning "Failed to update vagrant-libvirt plugin. Proceeding with existing version." + fi +else + log_info "Installing vagrant-libvirt plugin (this may take a moment)..." + i=1 + while [ "$i" -le 3 ]; do + if stdbuf -oL vagrant plugin install vagrant-libvirt; then + break + else + log_warning "Retry $i: vagrant-libvirt plugin install failed. Retrying in 5 seconds..." + sleep 5 + i=$((i + 1)) + fi + done + vagrant plugin list | grep -q vagrant-libvirt || log_error "Failed to install vagrant-libvirt plugin after 3 retries." +fi +log_info "vagrant-libvirt plugin installed/updated (version: $(vagrant plugin list | grep vagrant-libvirt | awk '{print $2}' | tr -d '()'))." + +# Verify libvirt connectivity +log_section "Verifying libvirt Connectivity" +if ! virsh -c qemu:///system list --all >/dev/null 2>virsh_error.log; then + log_error "virsh cannot connect to libvirt. Check permissions (id -nG $USER) or libvirtd issues.\n$(cat virsh_error.log)" +fi +rm -f virsh_error.log +log_info "libvirt is accessible via virsh." + +# Check nested virtualization +log_section "Checking Nested Virtualization" +if ! lscpu | grep -qE "Virtualization:.*VT-x|AMD-V"; then + log_error "Host CPU does not support virtualization (VT-x/AMD-V). Enable in BIOS/UEFI." +fi +KVM_NESTED_ENABLED=false +if [ -f /sys/module/kvm_intel/parameters/nested ]; then + if [ "$(cat /sys/module/kvm_intel/parameters/nested)" = Y ]; then + KVM_NESTED_ENABLED=true + log_info "Intel KVM nested virtualization is enabled." + else + log_warning "Intel KVM nested virtualization is supported but not enabled. Enabling..." + sudo modprobe -r kvm_intel || log_warning "Failed to unload kvm_intel module." + sudo modprobe kvm_intel nested=1 || log_warning "Failed to enable nested virtualization for kvm_intel." + [ "$(cat /sys/module/kvm_intel/parameters/nested)" = Y ] && KVM_NESTED_ENABLED=true + fi +elif [ -f /sys/module/kvm_amd/parameters/nested ]; then + if [ "$(cat /sys/module/kvm_amd/parameters/nested)" = 1 ]; then + KVM_NESTED_ENABLED=true + log_info "AMD KVM nested virtualization is enabled." + else + log_warning "AMD KVM nested virtualization is supported but not enabled. Enabling..." + sudo modprobe -r kvm_amd || log_warning "Failed to unload kvm_amd module." + sudo modprobe kvm_amd nested=1 || log_warning "Failed to enable nested virtualization for kvm_amd." + [ "$(cat /sys/module/kvm_amd/parameters/nested)" = 1 ] && KVM_NESTED_ENABLED=true + fi +else + log_error "KVM module parameters for nested virtualization not found. Ensure KVM is installed and loaded." +fi +if [ "$KVM_NESTED_ENABLED" = false ]; then + log_error "Nested virtualization could not be enabled. Required for OpenStack instances in VMs." +fi +log_info "Nested virtualization enabled." + +# Install Ansible in Virtual Environment +log_section "Setting Up Ansible Environment" +PYTHON_VENV_DIR="$HOME/venv" +if [ ! -d "$PYTHON_VENV_DIR" ]; then + PYTHONUNBUFFERED=1 python3 -m venv "$PYTHON_VENV_DIR" || log_error "Failed to create Python virtual environment. Ensure python3-venv is installed." + log_info "Virtual environment created at $PYTHON_VENV_DIR." +fi +. "$PYTHON_VENV_DIR/bin/activate" || log_error "Failed to activate virtual environment." +log_info "Virtual environment activated." +log_info "Installing Ansible and OpenStackSDK in virtual environment..." +PYTHONUNBUFFERED=1 stdbuf -oL pip install --upgrade pip setuptools wheel || log_warning "Failed to upgrade pip/setuptools/wheel. Continuing..." +PYTHONUNBUFFERED=1 stdbuf -oL pip install ansible==9.1.0 openstacksdk==4.6.0 || log_error "Failed to install Ansible and OpenStackSDK." +log_info "Ansible and OpenStackSDK installed (Ansible: $(ansible --version | head -n1), OpenStackSDK: $(pip show openstacksdk | grep Version))." + +# Verify project files +log_section "Verifying Project Files" +for file in Vagrantfile playbooks/site.yml inventory/hosts.ini requirements.yml; do + [ -f "$file" ] || log_error "Required file $file not found." +done +log_info "All essential project files found." + +# Validate requirements.yml +log_section "Validating Ansible Collections Requirements" +if grep -qE "collections:|^ *- name:.*version:.*$" requirements.yml; then + log_info "requirements.yml appears valid." +else + log_warning "requirements.yml may be malformed. Ensure it contains 'collections:' with valid entries." +fi + +# Install Ansible Collections +log_section "Installing Ansible Collections" +ANSIBLE_COLLECTIONS_PATH_ENV="$(pwd)/collections" +log_info "Creating collections directory structure at $ANSIBLE_COLLECTIONS_PATH_ENV..." +# Remove existing collections directory if it has permission issues +if [ -d "$ANSIBLE_COLLECTIONS_PATH_ENV" ]; then + rm -rf "$ANSIBLE_COLLECTIONS_PATH_ENV" || log_warning "Failed to remove existing collections directory." +fi +# Create directory structure with proper ownership +mkdir -p "$ANSIBLE_COLLECTIONS_PATH_ENV/ansible_collections/community" || log_error "Failed to create collections directory structure." +mkdir -p "$ANSIBLE_COLLECTIONS_PATH_ENV/ansible_collections/ansible" || log_error "Failed to create ansible collections directory." +# Ensure proper ownership +chown -R "$USER:$USER" "$ANSIBLE_COLLECTIONS_PATH_ENV" || log_warning "Failed to set ownership of collections directory." +chmod -R 755 "$ANSIBLE_COLLECTIONS_PATH_ENV" || log_warning "Failed to set permissions on collections directory." +if [ ! -d "$ANSIBLE_COLLECTIONS_PATH_ENV/ansible_collections" ]; then + log_error "Collections directory structure $ANSIBLE_COLLECTIONS_PATH_ENV/ansible_collections does not exist after creation attempt." +fi +log_info "Collections directory structure created with proper permissions at $ANSIBLE_COLLECTIONS_PATH_ENV." + +# Ensure we're using the virtual environment ansible-galaxy +log_info "Using virtual environment ansible-galaxy: $(which ansible-galaxy)" +log_info "Ansible version in virtual environment: $(ansible --version | head -n1)" + +i=1 +while [ "$i" -le 3 ]; do + if PYTHONUNBUFFERED=1 stdbuf -oL ansible-galaxy collection install -r requirements.yml -p "$ANSIBLE_COLLECTIONS_PATH_ENV" --force; then + log_info "Ansible Collections installed successfully." + break + else + log_warning "Retry $i: Failed to install Ansible collections. Retrying in 5 seconds..." + sleep 5 + i=$((i + 1)) + fi +done +[ "$i" -gt 3 ] && log_error "Failed to install Ansible collections after 3 retries. Check requirements.yml and network connectivity." + +# Start Vagrant VMs and ensure provisioning +log_section "Starting Vagrant VMs" +if stdbuf -oL vagrant status | grep -E "controller.*running|compute.*running" | wc -l | grep -q "^2$"; then + log_info "Both controller and compute VMs are running." + if [ "$FORCE_PROVISION" = true ]; then + log_info "Forcing Ansible provisioning..." + CONTROLLER_IP="$CONTROLLER_IP" COMPUTE_IP="$COMPUTE_IP" stdbuf -oL vagrant provision >vagrant_up.log 2>&1 || { + log_error "Vagrant provision failed. Check vagrant_up.log for details:\n$(cat vagrant_up.log)" + } + else + log_info "Skipping provisioning as VMs are already running. Use --force-provision to re-run Ansible." + fi +else + log_info "Starting and provisioning Vagrant VMs..." + # Check if the box is available locally before trying to download + BOX_NAME="${VAGRANT_BOX:-generic/ubuntu2004}" + if ! vagrant box list | grep -q "$BOX_NAME"; then + log_warning "Box '$BOX_NAME' not found locally. Attempting to download..." + if [ "$OFFLINE_MODE" = true ]; then + log_error "Offline mode enabled but box '$BOX_NAME' not found locally. Please add the box manually or disable offline mode." + fi + fi + + CONTROLLER_IP="$CONTROLLER_IP" COMPUTE_IP="$COMPUTE_IP" stdbuf -oL vagrant up --provider=libvirt --no-tty >vagrant_up.log 2>&1 || { + # Check if the error is related to box download + if grep -q "Could not resolve host\|Failed to download\|not found or could not be accessed" vagrant_up.log; then + log_warning "Vagrant up failed due to box download issues." + # Check if add-local-box.sh exists and is executable + if [ -f add-local-box.sh ] && [ -x add-local-box.sh ]; then + log_info "Attempting to add local box with add-local-box.sh..." + if ./add-local-box.sh --box-name="$BOX_NAME"; then + log_info "Local box added successfully. Retrying vagrant up..." + CONTROLLER_IP="$CONTROLLER_IP" COMPUTE_IP="$COMPUTE_IP" stdbuf -oL vagrant up --provider=libvirt --no-tty >vagrant_up.log 2>&1 || { + log_error "Vagrant up still failed after adding local box. Check vagrant_up.log for details:\n$(cat vagrant_up.log)" + } + else + log_error "Failed to add local box. Try:\n1. Check network connectivity\n2. Manually add a local box with: vagrant box add $BOX_NAME /path/to/box/file\n3. Use a different box by setting VAGRANT_BOX environment variable\n\nCheck vagrant_up.log for details:\n$(cat vagrant_up.log)" + fi + else + log_error "Vagrant up failed due to box download issues. Try:\n1. Check network connectivity\n2. Manually add a local box with: vagrant box add $BOX_NAME /path/to/box/file\n3. Use a different box by setting VAGRANT_BOX environment variable\n\nCheck vagrant_up.log for details:\n$(cat vagrant_up.log)" + fi + else + log_error "Vagrant up failed. Check vagrant_up.log for details:\n$(cat vagrant_up.log)" + fi + } +fi + +# Verify machines are running and SSH is accessible +log_section "Verifying VM Status and SSH Connectivity" +if stdbuf -oL vagrant status | grep -E "controller.*running|compute.*running" | wc -l | grep -q "^2$"; then + log_info "Both controller and compute VMs are running." + # Fix SSH private key ownership + for vm in controller compute; do + key_file=".vagrant/machines/$vm/libvirt/private_key" + if [ -f "$key_file" ]; then + sudo chown "$USER:$USER" "$key_file" || log_error "Failed to change ownership of $key_file to $USER." + chmod 600 "$key_file" || log_error "Failed to set permissions on $key_file." + log_info "Fixed ownership and permissions for $key_file." + else + log_error "Private key $key_file not found after VM start." + fi + done + # Test SSH configuration + if stdbuf -oL vagrant ssh-config >/dev/null 2>&1; then + log_info "SSH configuration is valid." + else + log_error "Vagrant SSH configuration is invalid after VM start. Check .vagrant/machines/*/libvirt/private_key permissions and Vagrantfile." + fi +else + log_error "VMs are not both running. Check vagrant_up.log for details:\n$(cat vagrant_up.log)" +fi + +# Verify Ansible playbook completion +log_section "Verifying Ansible Playbook Completion" +i=1 +while [ "$i" -le 3 ]; do + if PYTHONUNBUFFERED=1 stdbuf -oL ansible-galaxy collection install -r requirements.yml -p "$ANSIBLE_COLLECTIONS_PATH_ENV" --force; then ]; do + if grep -q "PLAY RECAP" vagrant_up.log; then + log_info "Ansible playbook completed. Checking for failures..." + for host in controller compute; do + if grep -A 2 "PLAY RECAP.*$host" vagrant_up.log | grep -q "failed=0"; then + : # No-op + else + log_error "Ansible playbook reported failures for $host. Check vagrant_up.log (search 'PLAY RECAP')." + fi + done + log_info "Ansible playbook (site.yml) completed successfully with no reported failures." + break + else + log_warning "Retry $i: PLAY RECAP not found in vagrant_up.log. Retrying in 10 seconds..." + sleep 10 + i=$((i + 1)) + fi +done +[ "$i" -gt 3 ] && log_error "Ansible playbook did not complete after 3 retries. Check vagrant_up.log for details:\n$(cat vagrant_up.log)" + +# Trigger cleanup if requested +log_section "Checking for Cleanup" +if [ "$CLEANUP" = true ]; then + log_info "Triggering cleanup as requested..." + if [ -f cleanup.sh ] && [ -x cleanup.sh ]; then + ./cleanup.sh --timeout="$TIMEOUT" || log_error "Cleanup failed." + log_info "Cleanup completed." + else + log_warning "cleanup.sh not found or not executable. Skipping cleanup." + fi +fi + +# Run test script if available +if [ -f test-setup.sh ] && [ -x test-setup.sh ]; then + log_info "Running setup verification tests..." + if ./test-setup.sh; then + log_info "Setup verification tests passed." + else + log_warning "Setup verification tests failed. Check the output above for details." + fi +fi + +log_section "Setup Complete" +log_info "You can now SSH into your VMs:" +log_info " vagrant ssh controller" +log_info " vagrant ssh compute" +log_info "To destroy the VMs later, run: ./cleanup.sh --timeout=$TIMEOUT" diff --git a/playbooks/ansible-openstack-nova/test-setup.sh b/playbooks/ansible-openstack-nova/test-setup.sh new file mode 100755 index 00000000..3e0c16f8 --- /dev/null +++ b/playbooks/ansible-openstack-nova/test-setup.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# test-setup.sh +# Test script to verify the OpenStack Nova setup + +# This script is designed to work with the OpenStack Nova setup project +# It can be called automatically by setup.sh to verify the setup + +set -e + +echo "Testing OpenStack Nova Setup" +echo "============================" + +# Check if VMs are running +echo "1. Checking VM status..." +if vagrant status | grep -E "controller.*running|compute.*running" | wc -l | grep -q "^2$"; then + echo "✓ Both controller and compute VMs are running" +else + echo "✗ VMs are not running properly" + exit 1 +fi + +# SSH into controller and check OpenStack services +echo "2. Checking OpenStack services..." +if vagrant ssh controller -c "source ~/admin-openrc.sh && openstack service list" >/dev/null 2>&1; then + echo "✓ OpenStack services are accessible" +else + echo "✗ Cannot access OpenStack services" + exit 1 +fi + +# Check if Nova services are running +echo "3. Checking Nova services..." +if vagrant ssh controller -c "source ~/admin-openrc.sh && openstack compute service list" >/dev/null 2>&1; then + echo "✓ Nova services are running" +else + echo "✗ Nova services are not running properly" + exit 1 +fi + +# Check if we can list images +echo "4. Checking Glance images..." +if vagrant ssh controller -c "source ~/admin-openrc.sh && openstack image list" >/dev/null 2>&1; then + echo "✓ Glance images are accessible" +else + echo "✗ Cannot access Glance images" + exit 1 +fi + +echo "" +echo "All tests passed! The OpenStack Nova setup is working correctly." +echo "" +echo "You can now:" +echo " - SSH into the controller: vagrant ssh controller" +echo " - SSH into the compute node: vagrant ssh compute" +echo " - Access OpenStack CLI on the controller VM" \ No newline at end of file diff --git a/playbooks/nova/README.md b/playbooks/nova/README.md new file mode 100644 index 00000000..f2b6a8b7 --- /dev/null +++ b/playbooks/nova/README.md @@ -0,0 +1,153 @@ +# OpenStack Nova Ansible Automation + +This project provides an **idempotent, role-based Ansible automation framework** for deploying and validating the OpenStack Nova (Compute) service across controller and compute nodes. It is designed for reproducible, production-grade deployments on Ubuntu-based systems. + +--- + +## Features + +* Validates Keystone and Glance availability before proceeding +* Installs and configures all core Nova components: + + * `nova-api`, `nova-conductor`, `nova-scheduler`, `nova-compute` +* Initializes and maps Nova cells (`cell0`, `cell1`) +* Configures hypervisor support using KVM and libvirt +* Provisions standard flavors (e.g. `m1.small`, `m1.large`) +* (Optional) Sets project quotas +* Deploys a test VM to validate end-to-end Nova functionality +* Modular and inventory-scoped using best practices + +--- + +## Directory Structure + +``` +nova/ +├── ansible.cfg +├── inventories/ +│ └── production/ +│ ├── hosts.yml +│ └── groups_vars/ +│ └── all.yml +├── playbooks/ +│ └── site.yml +├── requirements.yml +├── README.md +└── roles/ + ├── cell_discovery/ + ├── check_dependencies/ + ├── flavors/ + ├── kvm_config/ + ├── nova_compute/ + ├── nova_controller/ + └── test_vm_launch/ +``` + +--- + +## Usage + +### 1. Prerequisites + +* Target hosts should be Ubuntu 20.04+ with root SSH access +* OpenStack packages should already be installed (or provisioned via roles) +* A working Keystone + Glance setup +* The file `/root/admin-openrc.sh` must exist on the controller with valid OpenStack credentials + +### 2. Install Ansible Collections + +Collections are declared in `requirements.yml`: + +```yaml +# requirements.yml +collections: + - name: openstack.cloud + - name: community.general +``` + +Install them using: + +```bash +ansible-galaxy collection install -r requirements.yml +``` + +### 3. Source Keystone Credentials + +```bash +source /root/admin-openrc.sh +``` + +### 4. Vaulted Secrets + +The following sensitive variables are defined in `inventories/production/group_vars/all.yml`: + +```yaml +nova_db_password: "nova_db_pass" +nova_user_password: "nova_user_pass" +``` + +These values should be encrypted using [Ansible Vault](https://docs.ansible.com/ansible/latest/vault_guide/index.html) to prevent exposure in version control: + +```bash +ansible-vault encrypt inventories/production/group_vars/all.yml +``` + +They are securely used throughout all relevant roles (e.g. `nova_controller`, `nova_compute`). + +### 5. Run the Full Deployment + +```bash +ansible-playbook -i inventories/production/ playbooks/site.yml +``` + +### 6. Run by Component (Optional) + +* Controller node only: + + ```bash + ansible-playbook -i inventories/production/ playbooks/controller.yml + ``` + +* Compute node(s) only: + + ```bash + ansible-playbook -i inventories/production/ playbooks/compute.yml + ``` + +--- + +## Post-Deployment Validation + +Confirm Nova is functional: + +```bash +openstack compute service list +openstack flavor list +openstack server list +nova-status upgrade check +``` + +--- + +## Notes + +* The `test_vm_launch` role ensures Nova is functional by booting a temporary VM and validating its state. +* All roles are idempotent and fail gracefully when misconfigured. +* Group-scoped configuration (e.g. Keystone auth, DB credentials) is in: + + * `inventories/production/group_vars/controller.yml` + * `inventories/production/group_vars/compute.yml` + * Common credentials are shared in `group_vars/all.yml` and should be encrypted using Ansible Vault. + +--- + +## Requirements + +* Ansible ≥ 2.9 (2.12+ recommended) +* Required collections (installed via `requirements.yml`): + + * `openstack.cloud` + * `community.general` +* Functional DNS or `/etc/hosts` entries so compute nodes can resolve `controller` +* SSH key-based access to all nodes +* MySQL backend and RabbitMQ running if needed by Nova diff --git a/playbooks/nova/ansible.cfg b/playbooks/nova/ansible.cfg new file mode 100644 index 00000000..a6210562 --- /dev/null +++ b/playbooks/nova/ansible.cfg @@ -0,0 +1,12 @@ +[defaults] +inventory = inventories/production/hosts.yml +roles_path = ./roles +retry_files_enabled = false +host_key_checking = false +timeout = 30 +deprecation_warnings = false +interpreter_python = auto_silent + +[privilege_escalation] +become = true +become_method = sudo diff --git a/playbooks/nova/inventories/production/groups_vars/all.yml b/playbooks/nova/inventories/production/groups_vars/all.yml new file mode 100644 index 00000000..8e1b8b1d --- /dev/null +++ b/playbooks/nova/inventories/production/groups_vars/all.yml @@ -0,0 +1,3 @@ +nova_db_password: "nova_db_pass" +nova_user_password: "nova_user_pass" + diff --git a/playbooks/nova/inventories/production/hosts.yml b/playbooks/nova/inventories/production/hosts.yml new file mode 100644 index 00000000..f72fa73e --- /dev/null +++ b/playbooks/nova/inventories/production/hosts.yml @@ -0,0 +1,15 @@ +all: + vars: + ansible_ssh_private_key_file: ~/.ssh/ansible_key + ansible_password: hellon653 + children: + controller: + hosts: + controller: + ansible_host: 192.168.121.245 + ansible_user: vagrant + compute: + hosts: + compute1: + ansible_host: 192.168.121.245 + ansible_user: vagrant diff --git a/playbooks/nova/playbooks/site.yml b/playbooks/nova/playbooks/site.yml new file mode 100644 index 00000000..4f31d05b --- /dev/null +++ b/playbooks/nova/playbooks/site.yml @@ -0,0 +1,22 @@ +--- +- hosts: controller + gather_facts: true + become: true + roles: + - check_dependencies + - nova_controller + - cell_discovery + - flavors + +- hosts: compute + gather_facts: true + become: true + roles: + - nova_compute + - kvm_config + +- hosts: controller + gather_facts: true + become: true + roles: + - test_vm_launch diff --git a/playbooks/nova/requirements.yml b/playbooks/nova/requirements.yml new file mode 100644 index 00000000..1d2999e5 --- /dev/null +++ b/playbooks/nova/requirements.yml @@ -0,0 +1,4 @@ +collections: + - name: openstack.cloud + - name: community.general + - name: community.mysql \ No newline at end of file diff --git a/playbooks/nova/roles/cell_discovery/README.md b/playbooks/nova/roles/cell_discovery/README.md new file mode 100644 index 00000000..a787ec05 --- /dev/null +++ b/playbooks/nova/roles/cell_discovery/README.md @@ -0,0 +1,26 @@ +# cell_discovery + +This role handles the creation and discovery of Nova cells (cell0 and cell1), required for scaling out Nova compute services in an OpenStack deployment. + +## Features + +- Maps `cell0` (idempotent) +- Creates `cell1` if not present +- Discovers and registers compute hosts + +## Variables + +| Variable | Description | Default | +|------------------|--------------------------------------|--------------| +| `nova_manage_cmd` | Path to the `nova-manage` binary | `/usr/bin/nova-manage` | +| `nova_user` | System user that owns Nova services | `nova` | +| `nova_group` | System group for Nova | `nova` | + +## Usage + +Include this role after `nova_controller` and `nova_compute` roles are complete: + +```yaml +- hosts: controller + roles: + - cell_discovery diff --git a/playbooks/nova/roles/cell_discovery/defaults/main.yml b/playbooks/nova/roles/cell_discovery/defaults/main.yml new file mode 100644 index 00000000..ac111402 --- /dev/null +++ b/playbooks/nova/roles/cell_discovery/defaults/main.yml @@ -0,0 +1,4 @@ +--- +nova_manage_cmd: /usr/bin/nova-manage +nova_user: nova +nova_group: nova diff --git a/playbooks/nova/roles/cell_discovery/meta/main.yml b/playbooks/nova/roles/cell_discovery/meta/main.yml new file mode 100644 index 00000000..c889d1ab --- /dev/null +++ b/playbooks/nova/roles/cell_discovery/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: onelrian + description: Register Nova cells and map compute hosts. + min_ansible_version: "2.16" + platforms: + - name: Ubuntu + diff --git a/playbooks/nova/roles/cell_discovery/tasks/main.yml b/playbooks/nova/roles/cell_discovery/tasks/main.yml new file mode 100644 index 00000000..290a07c2 --- /dev/null +++ b/playbooks/nova/roles/cell_discovery/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- name: Map cell0 (idempotent) + become: true + command: "{{ nova_manage_cmd }} cell_v2 map_cell0" + register: map_cell0_result + changed_when: "'Cell0 is already setup' not in map_cell0_result.stdout" + failed_when: map_cell0_result.rc != 0 and 'Cell0 is already setup' not in map_cell0_result.stdout + +- name: Create cell1 (if not exists) + become: true + command: > + {{ nova_manage_cmd }} cell_v2 create_cell --name=cell1 --verbose + register: create_cell1_result + changed_when: "'already exists' not in create_cell1_result.stderr" + failed_when: create_cell1_result.rc != 0 and 'already exists' not in create_cell1_result.stderr + +- name: Discover compute hosts + become: true + command: > + {{ nova_manage_cmd }} cell_v2 discover_hosts --verbose + register: discover_hosts_result + changed_when: "'0 hosts' not in discover_hosts_result.stdout" + failed_when: discover_hosts_result.rc != 0 diff --git a/playbooks/nova/roles/check_dependencies/README.md b/playbooks/nova/roles/check_dependencies/README.md new file mode 100644 index 00000000..9287dd5a --- /dev/null +++ b/playbooks/nova/roles/check_dependencies/README.md @@ -0,0 +1,18 @@ +# Role: check_dependencies + +This role ensures that Keystone and Glance services are available and responsive before Nova installation proceeds. + +## Tasks: +- Authenticates against Keystone v3 +- Extracts token +- Uses token to verify Glance service availability + +## Variables: +Override via `group_vars/controller.yml` or env vars: +- `keystone_url` +- `keystone_user` +- `keystone_password` +- `keystone_project` + +## Failures: +- Aborts play early if services are unavailable. diff --git a/playbooks/nova/roles/check_dependencies/defaults/main.yml b/playbooks/nova/roles/check_dependencies/defaults/main.yml new file mode 100644 index 00000000..47104b96 --- /dev/null +++ b/playbooks/nova/roles/check_dependencies/defaults/main.yml @@ -0,0 +1,6 @@ +--- +keystone_url: "{{ lookup('env', 'OS_AUTH_URL') }}" +keystone_user: "{{ lookup('env', 'OS_USERNAME') }}" +keystone_password: "{{ lookup('env', 'OS_PASSWORD') }}" +keystone_project: "{{ lookup('env', 'OS_PROJECT_NAME') }}" +glance_url: "{{ lookup('env', 'OS_IMAGE_API_VERSION') | default('v2') }}" \ No newline at end of file diff --git a/playbooks/nova/roles/check_dependencies/tasks/main.yml b/playbooks/nova/roles/check_dependencies/tasks/main.yml new file mode 100644 index 00000000..18a73969 --- /dev/null +++ b/playbooks/nova/roles/check_dependencies/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: Check Keystone availability by requesting token + uri: + url: "{{ keystone_url }}{{ keystone_token_path }}" + method: POST + body_format: json + return_content: true + status_code: 201 + headers: + Content-Type: application/json + body: > + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "name": "{{ keystone_user }}", + "domain": { "id": "default" }, + "password": "{{ keystone_password }}" + } + } + }, + "scope": { + "project": { + "name": "{{ keystone_project }}", + "domain": { "id": "default" } + } + } + } + } + register: keystone_response + no_log: true + +- name: Extract Keystone token + set_fact: + keystone_token: "{{ keystone_response['headers']['X-Subject-Token'] }}" + +- name: Check Glance service availability + uri: + url: "{{ keystone_url | regex_replace('/v3$', '') }}/{{ glance_check_endpoint }}" + method: GET + headers: + X-Auth-Token: "{{ keystone_token }}" + status_code: 200 + register: glance_response + +- name: Assert Glance is reachable + assert: + that: + - glance_response.status == 200 + fail_msg: "Glance is not responding. Ensure Glance (US2.4) is installed and accessible." diff --git a/playbooks/nova/roles/check_dependencies/vars/main.yml b/playbooks/nova/roles/check_dependencies/vars/main.yml new file mode 100644 index 00000000..3de45284 --- /dev/null +++ b/playbooks/nova/roles/check_dependencies/vars/main.yml @@ -0,0 +1,3 @@ +--- +keystone_token_path: "/v3/auth/tokens" +glance_check_endpoint: "/v2/images" diff --git a/playbooks/nova/roles/flavors/README.md b/playbooks/nova/roles/flavors/README.md new file mode 100644 index 00000000..7227e340 --- /dev/null +++ b/playbooks/nova/roles/flavors/README.md @@ -0,0 +1,24 @@ +# flavors + +This role creates standard VM flavor definitions in OpenStack using the `openstack.cloud.compute_flavor` module. + +## Features + +- Creates standard instance types (`m1.tiny`, `m1.small`, etc.) +- Fully idempotent +- Uses Keystone credentials from the environment (`admin-openrc.sh`) + +## Variables + +| Variable | Description | Default | +|---------------------|--------------------------------------|--------------| +| `openstack_flavors` | List of flavor definitions | See defaults | +| `flavor_project` | Project under which to create flavors| `admin` | +| `flavor_region` | Target region name | `RegionOne` | + +## Usage + +```yaml +- hosts: controller + roles: + - flavors diff --git a/playbooks/nova/roles/flavors/defaults/main.yml b/playbooks/nova/roles/flavors/defaults/main.yml new file mode 100644 index 00000000..8c8c4bbf --- /dev/null +++ b/playbooks/nova/roles/flavors/defaults/main.yml @@ -0,0 +1,29 @@ +--- +# Define standard flavors to be created +openstack_flavors: + - name: m1.tiny + ram: 512 + vcpus: 1 + disk: 1 + - name: m1.small + ram: 2048 + vcpus: 1 + disk: 20 + - name: m1.medium + ram: 4096 + vcpus: 2 + disk: 40 + - name: m1.large + ram: 8192 + vcpus: 4 + disk: 80 + - name: m1.xlarge + ram: 16384 + vcpus: 8 + disk: 160 + +# Project scope (usually "admin") +flavor_project: admin + +# Region name (optional) +flavor_region: RegionOne diff --git a/playbooks/nova/roles/flavors/meta/main.yml b/playbooks/nova/roles/flavors/meta/main.yml new file mode 100644 index 00000000..6fac428b --- /dev/null +++ b/playbooks/nova/roles/flavors/meta/main.yml @@ -0,0 +1,7 @@ +--- +galaxy_info: + author: onelrian + description: Create standard VM flavors for OpenStack compute. + min_ansible_version: "2.16" + platforms: + - name: Ubuntu \ No newline at end of file diff --git a/playbooks/nova/roles/flavors/tasks/main.yml b/playbooks/nova/roles/flavors/tasks/main.yml new file mode 100644 index 00000000..11164a95 --- /dev/null +++ b/playbooks/nova/roles/flavors/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: Ensure required OpenStack credentials are sourced + assert: + that: + - lookup('env', 'OS_AUTH_URL') != '' + - lookup('env', 'OS_USERNAME') != '' + - lookup('env', 'OS_PASSWORD') != '' + - lookup('env', 'OS_PROJECT_NAME') != '' + fail_msg: "OpenStack credentials are not set. Source admin-openrc before running this role." + +- name: Create OpenStack flavors + become: true + vars: + flavor_extra_specs: {} + openstack.cloud.compute_flavor: + cloud: null # Use environment variables instead of clouds.yaml + name: "{{ item.name }}" + ram: "{{ item.ram }}" + vcpus: "{{ item.vcpus }}" + disk: "{{ item.disk }}" + region_name: "{{ flavor_region }}" + project: "{{ flavor_project }}" + is_public: true + state: present + extra_specs: "{{ flavor_extra_specs }}" + loop: "{{ openstack_flavors }}" diff --git a/playbooks/nova/roles/kvm_config/README.md b/playbooks/nova/roles/kvm_config/README.md new file mode 100644 index 00000000..5043db60 --- /dev/null +++ b/playbooks/nova/roles/kvm_config/README.md @@ -0,0 +1,17 @@ +# Role: kvm_config + +Configures KVM virtualization and libvirt on OpenStack compute nodes. + +## Responsibilities: +- Installs KVM, QEMU, and libvirt packages +- Checks for virtualization hardware support (VT-x/AMD-V) +- Starts and enables libvirt services +- Ensures 'nova' user is in 'libvirt' group + +## Variables: +- `kvm_packages`: List of required virtualization packages +- `libvirt_services`: Libvirt-related systemd units to enable + +## Notes: +- Uses `kvm-ok` on Ubuntu/Debian to validate CPU support +- For RHEL/CentOS, consider using `virt-host-validate` diff --git a/playbooks/nova/roles/kvm_config/defaults/main.yml b/playbooks/nova/roles/kvm_config/defaults/main.yml new file mode 100644 index 00000000..62899f2a --- /dev/null +++ b/playbooks/nova/roles/kvm_config/defaults/main.yml @@ -0,0 +1,9 @@ +--- +kvm_packages: + - qemu-kvm + - libvirt-daemon-system + - libvirt-clients + - bridge-utils + - virtinst + - virt-top + - cpu-checker diff --git a/playbooks/nova/roles/kvm_config/meta/main.yml b/playbooks/nova/roles/kvm_config/meta/main.yml new file mode 100644 index 00000000..058a1766 --- /dev/null +++ b/playbooks/nova/roles/kvm_config/meta/main.yml @@ -0,0 +1,5 @@ +--- +galaxy_info: + author: onelrian + description: Configure libvirt and KVM for OpenStack compute nodes + min_ansible_version: "2.16" diff --git a/playbooks/nova/roles/kvm_config/tasks/main.yml b/playbooks/nova/roles/kvm_config/tasks/main.yml new file mode 100644 index 00000000..0c7eced1 --- /dev/null +++ b/playbooks/nova/roles/kvm_config/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Install KVM and virtualization packages + package: + name: "{{ kvm_packages }}" + state: present + +- name: Check if CPU supports virtualization + command: kvm-ok + register: kvm_check + failed_when: kvm_check.rc != 0 or "'KVM acceleration can be used' not in kvm_check.stdout" + +- name: Ensure nova user to libvirt group + user: + name: nova + groups: libvirt + append: yes + state: present + +- name: Enable and start libvirt-related services + service: + name: "{{ item }}" + state: started + enabled: true + loop: "{{ libvirt_services }}" diff --git a/playbooks/nova/roles/kvm_config/vars/main.yml b/playbooks/nova/roles/kvm_config/vars/main.yml new file mode 100644 index 00000000..229dd393 --- /dev/null +++ b/playbooks/nova/roles/kvm_config/vars/main.yml @@ -0,0 +1,4 @@ +--- +libvirt_services: + - libvirtd + - virtlogd diff --git a/playbooks/nova/roles/nova_compute/README.md b/playbooks/nova/roles/nova_compute/README.md new file mode 100644 index 00000000..a16d006b --- /dev/null +++ b/playbooks/nova/roles/nova_compute/README.md @@ -0,0 +1,16 @@ +# Role: nova_compute + +Installs and configures Nova Compute service on compute nodes. + +## Responsibilities: +- Install nova-compute package +- Render /etc/nova/nova.conf with controller integration +- Ensure nova-compute service is enabled and running + +## Variables: +- `nova_user_password`: Keystone password for nova user +- `controller_host`: Hostname or IP of controller +- `virt_type`: Hypervisor type (e.g., kvm or qemu) + +## Notes: +- Assumes libvirt and KVM are configured (via `kvm_config` role) diff --git a/playbooks/nova/roles/nova_compute/defaults/main.yml b/playbooks/nova/roles/nova_compute/defaults/main.yml new file mode 100644 index 00000000..68a0167a --- /dev/null +++ b/playbooks/nova/roles/nova_compute/defaults/main.yml @@ -0,0 +1,3 @@ +--- +controller_host: "controller" +virt_type: "kvm" diff --git a/playbooks/nova/roles/nova_compute/handlers/main.yml b/playbooks/nova/roles/nova_compute/handlers/main.yml new file mode 100644 index 00000000..ab5146f1 --- /dev/null +++ b/playbooks/nova/roles/nova_compute/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart nova-compute + service: + name: nova-compute + state: restarted diff --git a/playbooks/nova/roles/nova_compute/meta/main.yml b/playbooks/nova/roles/nova_compute/meta/main.yml new file mode 100644 index 00000000..76e24cf8 --- /dev/null +++ b/playbooks/nova/roles/nova_compute/meta/main.yml @@ -0,0 +1,7 @@ +--- +galaxy_info: + author: onel + description: Nova Compute installation and configuration + min_ansible_version: "2.16" + platforms: + - name: Ubuntu \ No newline at end of file diff --git a/playbooks/nova/roles/nova_compute/tasks/main.yml b/playbooks/nova/roles/nova_compute/tasks/main.yml new file mode 100644 index 00000000..d42d3177 --- /dev/null +++ b/playbooks/nova/roles/nova_compute/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Install Nova Compute packages + package: + name: "{{ compute_packages }}" + state: present + +- name: Configure nova.conf for compute node + template: + src: nova.conf.j2 + dest: /etc/nova/nova.conf + owner: root + group: root + mode: '0644' + notify: restart nova-compute + +- name: Ensure nova-compute service is enabled and started + service: + name: nova-compute + state: started + enabled: true diff --git a/playbooks/nova/roles/nova_compute/templates/nova.conf.j2 b/playbooks/nova/roles/nova_compute/templates/nova.conf.j2 new file mode 100644 index 00000000..30175165 --- /dev/null +++ b/playbooks/nova/roles/nova_compute/templates/nova.conf.j2 @@ -0,0 +1,44 @@ +[DEFAULT] +enabled_apis = osapi_compute,metadata +transport_url = rabbit://openstack:password@{{ controller_host }} +my_ip = {{ ansible_default_ipv4.address }} +use_neutron = true +firewall_driver = nova.virt.firewall.NoopFirewallDriver + +[api] +auth_strategy = keystone + +[keystone_authtoken] +auth_url = http://{{ controller_host }}:5000/v3 +memcached_servers = {{ controller_host }}:11211 +auth_type = password +project_domain_name = Default +user_domain_name = Default +project_name = service +username = nova +password = {{ nova_user_password }} + +[vnc] +enabled = true +vncserver_listen = 0.0.0.0 +vncserver_proxyclient_address = {{ ansible_default_ipv4.address }} +novncproxy_base_url = http://{{ controller_host }}:6080/vnc_auto.html + +[glance] +api_servers = http://{{ controller_host }}:9292 + +[oslo_concurrency] +lock_path = /var/lib/nova/tmp + +[placement] +region_name = RegionOne +project_domain_name = Default +project_name = service +auth_type = password +user_domain_name = Default +auth_url = http://{{ controller_host }}:5000/v3 +username = placement +password = {{ nova_user_password }} + +[libvirt] +virt_type = {{ virt_type }} diff --git a/playbooks/nova/roles/nova_compute/vars/main.yml b/playbooks/nova/roles/nova_compute/vars/main.yml new file mode 100644 index 00000000..e4ec78ef --- /dev/null +++ b/playbooks/nova/roles/nova_compute/vars/main.yml @@ -0,0 +1,4 @@ +--- +compute_packages: + - nova-compute + - python3-nova diff --git a/playbooks/nova/roles/nova_controller/README.md b/playbooks/nova/roles/nova_controller/README.md new file mode 100644 index 00000000..8411d041 --- /dev/null +++ b/playbooks/nova/roles/nova_controller/README.md @@ -0,0 +1,29 @@ +# Role: nova\_controller + +This role installs and configures the Nova controller services for OpenStack. + +## Responsibilities + +* Create `nova` and `nova_api` MySQL databases. +* Create the Keystone `nova` user and assign the `admin` role. +* Register the Nova service and API endpoints (public, internal, admin) in Keystone. +* Install and enable Nova controller components: `nova-api`, `nova-scheduler`, `nova-conductor`. +* Manage `nova.conf` configuration using a Jinja2 template. +* Synchronize Nova and Nova API database schemas. + +## Variables + +| Variable | Description | +| ---------------------------- | ---------------------------------------------------- | +| `nova_db_password` | Password for the MySQL user `nova`. | +| `nova_user_password` | Password for the Keystone user `nova`. | +| `nova_api_url` | Base URL used for registering Nova API endpoints. | +| `db_host` | Hostname or IP address of the MySQL database server. | +| `keystone_host` | Hostname or IP address of the Keystone service. | +| `memcached_host` | Hostname or IP address of the Memcached server. | +| `nova_keystone_service_name` | Keystone service name (default: `nova`). | +| `nova_keystone_service_type` | Keystone service type (default: `compute`). | +| `nova_keystone_description` | Description for the Keystone Nova service. | +| `nova_services` | List of Nova services to manage and start. | + +Variables should be defined in `group_vars`, `host_vars`, or passed at runtime. diff --git a/playbooks/nova/roles/nova_controller/defaults/main.yml b/playbooks/nova/roles/nova_controller/defaults/main.yml new file mode 100644 index 00000000..e4f91dcd --- /dev/null +++ b/playbooks/nova/roles/nova_controller/defaults/main.yml @@ -0,0 +1,3 @@ +--- +nova_connection: "mysql+pymysql://nova:{{ nova_db_password }}@controller/nova" +compute_host: controller \ No newline at end of file diff --git a/playbooks/nova/roles/nova_controller/handlers/main.yml b/playbooks/nova/roles/nova_controller/handlers/main.yml new file mode 100644 index 00000000..279f2eb7 --- /dev/null +++ b/playbooks/nova/roles/nova_controller/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart nova services + service: + name: "{{ item.name }}" + state: restarted + loop: "{{ nova_services }}" diff --git a/playbooks/nova/roles/nova_controller/meta/main.yml b/playbooks/nova/roles/nova_controller/meta/main.yml new file mode 100644 index 00000000..d7a6a4e4 --- /dev/null +++ b/playbooks/nova/roles/nova_controller/meta/main.yml @@ -0,0 +1,7 @@ +--- +galaxy_info: + author: onelrian + description: Nova controller installation and registration + min_ansible_version: "2.16" + platforms: + - name: Ubuntu \ No newline at end of file diff --git a/playbooks/nova/roles/nova_controller/tasks/main.yml b/playbooks/nova/roles/nova_controller/tasks/main.yml new file mode 100644 index 00000000..da7f5020 --- /dev/null +++ b/playbooks/nova/roles/nova_controller/tasks/main.yml @@ -0,0 +1,86 @@ +--- +- name: Install Nova controller packages + package: + name: + - nova-api + - nova-conductor + - nova-scheduler + - python3-openstackclient + - python3-nova + state: present + +- name: Create nova databases + community.mysql.mysql_db: + name: "{{ item }}" + state: present + loop: + - nova + - nova_api + +- name: Grant access to nova database + community.mysql.mysql_user: + name: nova + password: "{{ nova_db_password }}" + priv: + - "nova.*:ALL" + - "nova_api.*:ALL" + host: "%" + state: present + +- name: Source admin credentials + shell: source /root/admin-openrc.sh + args: + executable: /bin/bash + +- name: Create nova user + command: openstack user create --domain default --password "{{ nova_user_password }}" nova + register: create_nova_user + failed_when: create_nova_user.rc != 0 and 'Conflict' not in create_nova_user.stderr + changed_when: "'Created' in create_nova_user.stdout" + +- name: Add admin role to nova user + command: openstack role add --project service --user nova admin + ignore_errors: yes + +- name: Register nova service + command: openstack service create --name nova --description "{{ nova_keystone_description }}" {{ nova_keystone_service_type }} + register: nova_service + failed_when: nova_service.rc != 0 and 'Conflict' not in nova_service.stderr + changed_when: "'Created' in nova_service.stdout" + +- name: Register nova endpoints + block: + - name: Create public endpoint + command: openstack endpoint create --region RegionOne {{ nova_keystone_service_type }} public {{ nova_api_url }} + register: ep1 + failed_when: ep1.rc != 0 and 'Conflict' not in ep1.stderr + - name: Create internal endpoint + command: openstack endpoint create --region RegionOne {{ nova_keystone_service_type }} internal {{ nova_api_url }} + register: ep2 + failed_when: ep2.rc != 0 and 'Conflict' not in ep2.stderr + - name: Create admin endpoint + command: openstack endpoint create --region RegionOne {{ nova_keystone_service_type }} admin {{ nova_api_url }} + register: ep3 + failed_when: ep3.rc != 0 and 'Conflict' not in ep3.stderr + +- name: Configure nova.conf + template: + src: nova.conf.j2 + dest: /etc/nova/nova.conf + owner: root + group: root + mode: '0644' + notify: restart nova services + +- name: Sync nova-api DB + command: su -s /bin/sh -c "nova-manage api_db sync" nova + +- name: Sync nova DB + command: su -s /bin/sh -c "nova-manage db sync" nova + +- name: Enable and start nova services + service: + name: "{{ item.name }}" + state: started + enabled: true + loop: "{{ nova_services }}" diff --git a/playbooks/nova/roles/nova_controller/templates/nova.conf.j2 b/playbooks/nova/roles/nova_controller/templates/nova.conf.j2 new file mode 100644 index 00000000..fc159f65 --- /dev/null +++ b/playbooks/nova/roles/nova_controller/templates/nova.conf.j2 @@ -0,0 +1,21 @@ +[DEFAULT] +enabled_apis = osapi_compute,metadata + +[api_database] +connection = mysql+pymysql://nova:{{ nova_db_password }}@{{ compute_host }}/nova_api + +[database] +connection = mysql+pymysql://nova:{{ nova_db_password }}@{{ compute_host }}/nova + +[keystone_authtoken] +auth_url = http://{{ compute_host }}:5000/v3 +memcached_servers = {{ compute_host }}:11211 +auth_type = password +project_domain_name = Default +user_domain_name = Default +project_name = service +username = nova +password = {{ nova_user_password }} + +[oslo_concurrency] +lock_path = /var/lib/nova/tmp diff --git a/playbooks/nova/roles/nova_controller/vars/main.yml b/playbooks/nova/roles/nova_controller/vars/main.yml new file mode 100644 index 00000000..07b4496c --- /dev/null +++ b/playbooks/nova/roles/nova_controller/vars/main.yml @@ -0,0 +1,9 @@ +--- +nova_services: + - name: nova-api + - name: nova-scheduler + - name: nova-conductor +nova_keystone_service_name: "nova" +nova_keystone_service_type: "compute" +nova_keystone_description: "OpenStack Compute Service" +nova_api_url: "http://{{ nova_host }}:8774/v2.1" diff --git a/playbooks/nova/roles/test_vm_launch/README.md b/playbooks/nova/roles/test_vm_launch/README.md new file mode 100644 index 00000000..fc59a9bc --- /dev/null +++ b/playbooks/nova/roles/test_vm_launch/README.md @@ -0,0 +1,31 @@ +# test_vm_launch + +This role launches a temporary test VM to validate the correct functioning of the Nova compute stack in an OpenStack environment. + +## Features + +- Provisions a test instance using a known flavor/image/network +- Waits for VM to reach `ACTIVE` state +- Optionally cleans up VM and keypair afterward +- Fully idempotent and repeatable + +## Variables + +| Variable | Description | Default | +|------------------------|-------------------------------------|---------------| +| `test_vm_name` | Name of the test VM | `test-instance` | +| `test_vm_image` | Glance image name to use | `cirros` | +| `test_vm_flavor` | Flavor name to use | `m1.tiny` | +| `test_vm_network` | Network name to attach | `private` | +| `test_vm_keypair` | SSH keypair name | `test-key` | +| `test_vm_create_keypair` | Whether to create/delete keypair | `true` | +| `test_vm_key_path` | Path to generated local key | `/tmp/test-key.pem` | +| `test_vm_timeout` | Timeout for instance launch | `300` | +| `test_vm_cleanup` | Whether to delete VM + key after test | `true` | + +## Usage + +```yaml +- hosts: controller + roles: + - test_vm_launch diff --git a/playbooks/nova/roles/test_vm_launch/defaults/main.yml b/playbooks/nova/roles/test_vm_launch/defaults/main.yml new file mode 100644 index 00000000..8dfb3b9a --- /dev/null +++ b/playbooks/nova/roles/test_vm_launch/defaults/main.yml @@ -0,0 +1,11 @@ +--- +test_vm_name: test-instance +test_vm_image: cirros +test_vm_flavor: m1.tiny +test_vm_network: private # Must exist already +test_vm_keypair: test-key +test_vm_timeout: 300 +test_vm_cleanup: true # If false, VM will remain for manual inspection +test_vm_ssh_user: cirros +test_vm_create_keypair: true +test_vm_key_path: /tmp/test-key.pem diff --git a/playbooks/nova/roles/test_vm_launch/meta/main.yml b/playbooks/nova/roles/test_vm_launch/meta/main.yml new file mode 100644 index 00000000..9b2b90b0 --- /dev/null +++ b/playbooks/nova/roles/test_vm_launch/meta/main.yml @@ -0,0 +1,7 @@ +--- +galaxy_info: + author: onelrian + description: Launch and verify a test VM for Nova service integration testing. + min_ansible_version: "2.16" + platforms: + - name: Ubuntu \ No newline at end of file diff --git a/playbooks/nova/roles/test_vm_launch/tasks/main.yml b/playbooks/nova/roles/test_vm_launch/tasks/main.yml new file mode 100644 index 00000000..62620ffd --- /dev/null +++ b/playbooks/nova/roles/test_vm_launch/tasks/main.yml @@ -0,0 +1,59 @@ +--- +- name: Ensure OpenStack credentials are present + assert: + that: + - lookup('env', 'OS_AUTH_URL') != '' + fail_msg: "Keystone credentials not set — did you source admin-openrc?" + +- name: Create keypair if needed + when: test_vm_create_keypair + openstack.cloud.keypair: + name: "{{ test_vm_keypair }}" + state: present + public_key_file: "{{ test_vm_key_path }}.pub" + +- name: Boot test VM instance + openstack.cloud.server: + name: "{{ test_vm_name }}" + image: "{{ test_vm_image }}" + flavor: "{{ test_vm_flavor }}" + network: "{{ test_vm_network }}" + key_name: "{{ test_vm_keypair }}" + wait: true + timeout: "{{ test_vm_timeout }}" + auto_ip: false + state: present + register: vm_boot + +- name: Assert test VM is ACTIVE + assert: + that: + - vm_boot.server.status == "ACTIVE" + fail_msg: "Test VM failed to reach ACTIVE state. Status: {{ vm_boot.server.status }}" + +- name: Print test VM info + debug: + msg: "Test VM {{ test_vm_name }} is ACTIVE. ID: {{ vm_boot.server.id }}" + +- name: Delete test VM + when: test_vm_cleanup + openstack.cloud.server: + name: "{{ test_vm_name }}" + state: absent + wait: true + timeout: "{{ test_vm_timeout }}" + +- name: Remove keypair + when: test_vm_cleanup and test_vm_create_keypair + openstack.cloud.keypair: + name: "{{ test_vm_keypair }}" + state: absent + +- name: Remove local key files + when: test_vm_cleanup and test_vm_create_keypair + file: + path: "{{ item }}" + state: absent + loop: + - "{{ test_vm_key_path }}" + - "{{ test_vm_key_path }}.pub"