diff --git a/dev/img-mirror/.gitignore b/dev/img-mirror/.gitignore deleted file mode 100644 index 3fba00b..0000000 --- a/dev/img-mirror/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.img -*.qcow -log.txt -PID diff --git a/dev/img-mirror/README.md b/dev/img-mirror/README.md deleted file mode 100644 index 2c0ce9b..0000000 --- a/dev/img-mirror/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# local webserver for testing images - -`up_image_url = http://localhost:8000/Rocky-9-GenericCloud.latest.x86_64.qcow2` diff --git a/dev/img-mirror/get.sh b/dev/img-mirror/get.sh deleted file mode 100755 index 03a5301..0000000 --- a/dev/img-mirror/get.sh +++ /dev/null @@ -1,15 +0,0 @@ -# downloads image for use with serve.sh -if [ ! -f "noble-server-cloudimg-amd64.img" ] -then -wget "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" -fi - -# untested -if [ ! -f amzn2-kvm-2.0.20240306.2-x86_64.xfs.gpt.qcow2 ] -then -wget https://cdn.amazonlinux.com/os-images/2.0.20240306.2/kvm/amzn2-kvm-2.0.20240306.2-x86_64.xfs.gpt.qcow2 -fi -#if [ ! -f Rocky-9-GenericCloud.latest.x86_64.qcow2 ] -#then -#wget https://mirrors.vinters.com/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2 -#fi diff --git a/dev/img-mirror/serve.sh b/dev/img-mirror/serve.sh deleted file mode 100755 index 50868bc..0000000 --- a/dev/img-mirror/serve.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -kill -9 $(cat PID) > /dev/null 2>&1 -python3 -m http.server -b "::" > log.txt 2>&1 & -echo $! > PID diff --git a/docs/SETUP.md b/docs/SETUP.md index 66a7955..cac3d0c 100644 --- a/docs/SETUP.md +++ b/docs/SETUP.md @@ -1,35 +1,28 @@ -# kopsrox setup +# :hammer_and_wrench: - Setup -## :hammer_and_wrench: requirements +## requirements -- Proxmox VE with root access or a user who can 'sudo su' without a password +- Proxmox VE with root access / a user who can 'sudo su' without a password - network with internet access configured in proxmox as a bridge or a proxmox sdn network - a range of 10 free Proxmox qm/virtual machine id 'vmids' eg 600 to 610 - a range of 10 IP's on a network with internet access for kopsrox to work with eg 192.168.0.160 to 192.168.0.170 -## :bricks: install +## install - get one of the releases or stable branches - the 'main' branch can often be a bit broken -- sudo apt install libguestfs-tools python3-termcolor python3-wget -y` -- pip3 install --break-system-packages --user -r requirements.txt` +- sudo apt install libguestfs-tools python3-termcolor python3-wget python3-proxmoxer dhcpcd -y` -## :star: generate api key +## generate api key ( to use below ) `sudo pvesh create /access/users/root@pam/token/kopsrox` `sudo pveum acl modify / --roles Administrator --user root@pam --token 'root@pam!kopsrox'` -## :construction_worker: create kopsrox.ini +## create kopsrox.ini run `./kopsrox.py` and an example _kopsrox.ini_ will be generated - you will need to edit this for your setup -Most values should be hopefully obvious and commented accordingly... - -# kopsrox.ini - -## :computer: cluster_id - -`620` - the proxmox id used for the kopsrox image/template - and the first id in the proxmox qemu virtual machinee "id" range used by kopsrox +Most values should be hopefully easy to work out hopefully. kopsrox uses a simple static id/ip assignment scheme based on the `[cluster] - cluster_id` and `[kopsrox] - network_ip` settings @@ -44,7 +37,7 @@ cluster_id = 620 cluster_name = kopsrox ``` -would result in this: +would result in this cluster layout: |-|vmid|ip|type|host| |--|--|--|--|--| diff --git a/kopsrox.py b/kopsrox.py index dc47310..d677645 100755 --- a/kopsrox.py +++ b/kopsrox.py @@ -49,6 +49,7 @@ "reboot" : 'hostname', "k3s-uninstall" : 'hostname', "rejoin-slave" : 'hostname', + "cluster-exec" : 'command', } } diff --git a/lib/kopsrox_config.py b/lib/kopsrox_config.py index fac309d..9383b16 100755 --- a/lib/kopsrox_config.py +++ b/lib/kopsrox_config.py @@ -77,11 +77,11 @@ def conf_check(section,value): # api connection prox = ProxmoxAPI( - conf_check('proxmox','prox_endpoint'), - port=conf_check('proxmox','port'), - user=conf_check('proxmox','user'), - token_name=conf_check('proxmox','token_name'), - token_value=conf_check('proxmox','api_key'), + conf_check('proxmox','proxmox_endpoint'), + port=conf_check('proxmox','proxmox_api_port'), + user=conf_check('proxmox','proxmox_user'), + token_name=conf_check('proxmox','proxmox_token_name'), + token_value=conf_check('proxmox','proxmox_token_value'), verify_ssl=False, timeout=5) @@ -93,7 +93,7 @@ def conf_check(section,value): exit(0) # map passed node name -node = conf_check('proxmox','node') +node = conf_check('proxmox','proxmox_node') # try k8s ping try: diff --git a/lib/kopsrox_ini.py b/lib/kopsrox_ini.py index 6ea398e..c702896 100755 --- a/lib/kopsrox_ini.py +++ b/lib/kopsrox_ini.py @@ -15,27 +15,27 @@ def init_kopsrox_ini(): # proxmox api endpoint config.set('proxmox', '; domain or IP to access proxmox') - config.set('proxmox', 'prox_endpoint', '127.0.0.1') + config.set('proxmox', 'proxmox_endpoint', '127.0.0.1') # proxmox api port config.set('proxmox', '; api port ( usually 8006 ) ') - config.set('proxmox', 'port', '8006') + config.set('proxmox', 'proxmox_api_port', '8006') # username config.set('proxmox', '; username to connect with / owner of the API token') - config.set('proxmox', 'user', 'root@pam') + config.set('proxmox', 'proxmox_user', 'root@pam') # api token name config.set('proxmox', '; name of api token') - config.set('proxmox', 'token_name', 'kopsrox') + config.set('proxmox', 'proxmox_token_name', 'kopsrox') # api key config.set('proxmox', '; text of api key') - config.set('proxmox', 'api_key', 'xxxxxxxxxxxxx') + config.set('proxmox', 'proxmox_token_value', 'xxxxxxxxxxxxx') # node to operate on config.set('proxmox', '; the proxmox node that you will run kopsrox on - the image and all nodes are created on this host') - config.set('proxmox', 'node', 'proxmox') + config.set('proxmox', 'proxmox_node', 'proxmox') # storage on node config.set('proxmox', '; the proxmox storage to use for kopsrox - needs to be available on the proxmox node') diff --git a/lib/kopsrox_k3s.py b/lib/kopsrox_k3s.py index f413f44..ac2a75a 100755 --- a/lib/kopsrox_k3s.py +++ b/lib/kopsrox_k3s.py @@ -39,8 +39,9 @@ def k3s_init_node(vmid: int = masterid,nodetype = 'master'): exit(0) # defines - k3s_install_master = f'cat /k3s.sh | sh -s - server --cluster-init' - k3s_install_worker = f'cat /k3s.sh | K3S_URL="https://{network_ip}:6443" ' + k3s_install_version = f'cat /k3s.sh | INSTALL_K3S_VERSION={k3s_version}' + k3s_install_master = f'{k3s_install_version} sh -s - server --cluster-init' + k3s_install_worker = f'{k3s_install_version} K3S_URL="https://{network_ip}:6443" ' master_cmd = '' token = '' @@ -290,16 +291,23 @@ def export_k3s_token(): # cluster info def cluster_info(): - + + # live nodes in cluster + cluster_info_vms = list_kopsrox_vm() + + # check m1 id exists + if not masterid in cluster_info_vms: + kmsg(kname, f'cluster {cluster_name} does not exist', 'err') + exit(0) + kmsg(f'cluster_info', '', 'sys') curr_master = get_kube_vip_master() - info_vms = list_kopsrox_vm() # for kopsrox vms - for vmid in info_vms: + for vmid in cluster_info_vms: if not cluster_id == vmid: hostname = vmnames[vmid] - vmstatus = f'[{info_vms[vmid]}] {vmip(vmid)}/{network_mask}' + vmstatus = f'[{cluster_info_vms[vmid]}] {vmip(vmid)}/{network_mask}' if hostname == curr_master: vmstatus += f' vip {network_ip}/{network_mask}' kmsg(f'{hostname}_{vmid}', f'{vmstatus}') diff --git a/lib/kopsrox_kmsg.py b/lib/kopsrox_kmsg.py index a0fe1e0..8b7e1db 100755 --- a/lib/kopsrox_kmsg.py +++ b/lib/kopsrox_kmsg.py @@ -11,8 +11,7 @@ def kmsg(kname = 'kopsrox',msg = 'no msg', sev = 'info'): # print cluster name cprint(knamea[0], "blue",attrs=["bold"], end='') - cprint('-', "magenta",attrs=["bold"], end='') - cprint('<:', "cyan", end='') + print('-', end='') try: if knamea[1] and sev == 'info': @@ -26,5 +25,5 @@ def kmsg(kname = 'kopsrox',msg = 'no msg', sev = 'info'): print(kname,msg) # final output - cprint(':> ', "cyan", end='') - print(msg) + print(f' - {msg}') + diff --git a/lib/verb_cluster.py b/lib/verb_cluster.py index 9d89d0d..cb5b73a 100755 --- a/lib/verb_cluster.py +++ b/lib/verb_cluster.py @@ -2,7 +2,6 @@ # functions from kopsrox_config import * -from kopsrox_proxmox import clone,qaexec from kopsrox_k3s import * # passed command diff --git a/lib/verb_image.py b/lib/verb_image.py index 6803962..99dc648 100755 --- a/lib/verb_image.py +++ b/lib/verb_image.py @@ -19,7 +19,7 @@ # check if image already exists if os.path.isfile(cloud_image): - kmsg(f'image_check', f'{cloud_image} already exists - removing', 'warn') + kmsg(f'{kname}check', f'{cloud_image} already exists - removing') try: os.remove(cloud_image) if os.path.isfile(cloud_image): @@ -56,27 +56,6 @@ # script to run in kopsrox image virtc_script = f'''\ curl -v https://get.k3s.io > /k3s.sh - -if [ ! -f /usr/bin/qemu-ga ] -then - if [ -f /bin/yum ] - then - yum install -y qemu-guest-agent - else - apt update && apt install qemu-guest-agent -y - fi -fi - -if [ -f /etc/selinux/config ] -then - sed -i s/enforcing/disabled/g /etc/selinux/config -fi - -if [ -f /etc/sysconfig/qemu-ga ] -then - cp /dev/null /etc/sysconfig/qemu-ga -fi - mkdir -p /var/lib/rancher/k3s/server/manifests/ echo ' apiVersion: helm.cattle.io/v1 @@ -114,9 +93,10 @@ # shouldn't really need root/sudo but run into permissions problems kmsg(f'{kname}virt-customize', 'configuring image') virtc_cmd = f''' -sudo virt-customize -a {cloud_image} \ ---run-command "{virtc_script}" \ ---copy-in {kv_yaml}:/var/lib/rancher/k3s/server/manifests/''' +sudo virt-customize -a {cloud_image} \ +--install qemu-guest-agent \ +--run-command "{virtc_script}" \ +--copy-in {kv_yaml}:/var/lib/rancher/k3s/server/manifests/ > virt-customize.log 2>&1''' local_os_process(virtc_cmd) # destroy template if it exists @@ -128,11 +108,6 @@ # define image desc img_ts = str(datetime.now()) image_desc = f'''
-▗▖ ▗▖ ▗▄▖ ▗▄▄▖  ▗▄▄▖▗▄▄▖  ▗▄▖ ▗▖  ▗▖
-▐▌▗▞▘▐▌ ▐▌▐▌ ▐▌▐▌   ▐▌ ▐▌▐▌ ▐▌ ▝▚▞▘ 
-▐▛▚▖ ▐▌ ▐▌▐▛▀▘  ▝▀▚▖▐▛▀▚▖▐▌ ▐▌  ▐▌  
-▐▌ ▐▌▝▚▄▞▘▐▌   ▗▄▄▞▘▐▌ ▐▌▝▚▄▞▘▗▞▘▝▚▖
-
 cluster_name: {cluster_name}
 cloud_img: {cloud_image}
 k3s_version: {k3s_version}
@@ -165,16 +140,16 @@
   # shell to import disk
   # import-from requires the full path os.getcwd required here
   import_cmd = f'''
-sudo qm set {cluster_id} --scsi0 {storage}:0,import-from={os.getcwd()}/{cloud_image},iothread=true,aio=io_uring
+sudo qm set {cluster_id} --scsi0 {storage}:0,import-from={os.getcwd()}/{cloud_image},iothread=true,aio=native
 mv {cloud_image} {cloud_image}.patched'''
 
   # run shell command to import
+  kmsg(f'{kname}qm-import', f'importing disk')
   local_os_process(import_cmd)
 
   # convert to template via create base disk also vm config
   prox_task(prox.nodes(node).qemu(cluster_id).template.post())
   prox_task(prox.nodes(node).qemu(cluster_id).config.post(template = 1))
-  kmsg(f'{kname}qm-import', f'done')
 
 # image info
 if cmd == 'info':
diff --git a/lib/verb_node.py b/lib/verb_node.py
index e58b937..16f98b5 100755
--- a/lib/verb_node.py
+++ b/lib/verb_node.py
@@ -17,6 +17,15 @@
 # define kname
 kname = 'node_'+cmd
 
+# cmd runs through all vms
+if cmd == 'cluster-exec':
+  for vmid in vms:
+    if vmid != cluster_id:
+      kmsg('node_cluster-exec', f'{vmnames[vmid]} {arg}')
+      os.system(f'sudo qm guest exec {vmid} {arg}')
+  exit(1)
+
+
 # all commands aside from utility require a hostname passed - so check them here
 if cmd not in ['utility']:
 
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index f9b1040..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-proxmoxer==2.1.0