diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..72da218 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +***/kubespray/ + +.server-venv diff --git a/README.md b/README.md index a4e0904..0f0de18 100644 --- a/README.md +++ b/README.md @@ -21,8 +21,6 @@ Use your own server sudo apt install openssh-server sudo systemctl enable ssh sudo systemctl start ssh - sudo ufw enable - sudo ufw allow ssh ``` - #### Disable sleep for the server @@ -45,12 +43,10 @@ Use your own server ssh-copy-id -i ~/.ssh/id_ed25519.pub -o 'IdentityFile ~/.ssh/.key' -p @ ``` -- #### Install ansible +- #### Install python and pip ``` sudo apt update - sudo apt install software-properties-common - sudo add-apt-repository --yes --update ppa:ansible/ansible - sudo apt install ansible + sudo apt install python3 pip ``` - #### Update the `group_vars/all` file to fill out the required information there @@ -95,73 +91,112 @@ Use your own server - You can add `-vvvv` to get more verbose output - #### After the installation + + **REMEMBER**: You can add additional directories for services via the `group_vars` file as well under the `persistence` section. + + ```yaml + - name: spare-disk + host_path: "/mnt/b/downloads" + ``` + + The above section will mount `/mnt/b/downloads` onto the pod as `/data/spare-disk/downloads` + + - ##### [OPTIONAL] Setup Fishet + - Consider setting up [fishnet](https://github.com/lichess-org/fishnet) to help [Lichess](https://lichess.org/) run game analysis! + - Kubernetes installations are also supported and documented [here](https://github.com/lichess-org/fishnet/blob/master/doc/install.md#kubernetes) + - ##### Setup Grafana - - Add the recommended dashboards + - Add the recommended dashboards (Make sure you select the correct job in the variables section, you can default to `kubernetes-service-scraper`) - [Node Exporter Full](https://grafana.com/grafana/dashboards/1860) + - [Loki Kubernetes Logs](https://grafana.com/grafana/dashboards/15141) + - [Container Log Dashboard](https://grafana.com/grafana/dashboards/16966) + - [Sonarr v3](https://grafana.com/grafana/dashboards/12530-sonarr-v3/) + - [Radarr v3](https://grafana.com/grafana/dashboards/12896-radarr-v3/) - [Pods (Aggregated view)](https://grafana.com/grafana/dashboards/8860) - [Monitor Pod CPU and Memory usage](https://grafana.com/grafana/dashboards/15055) - [Node Exporter for Prometheus Dashboard EN v20201010](https://grafana.com/grafana/dashboards/11074) - - [Loki Kubernetes Logs](https://grafana.com/grafana/dashboards/15141) - Would recommend adding a panel with the following query as it is useful to monitor pods as well - For average ``` - avg(irate(container_cpu_usage_seconds_total[2m])) by (pod,container) + avg(irate(container_cpu_usage_seconds_total[2m])) by (name) ``` - You can find information on how to use [Loki](https://grafana.com/oss/loki/) in Grafana [here](https://grafana.com/docs/loki/latest/operations/grafana/) + - ##### Setup Home Assistant + - Portal for adding and monitoring home automation devices (like zigbee devices) + - To add zigbee support to your home assistant backed server, you can buy the [Home Assistant Connect ZBT-1](https://www.home-assistant.io/connectzbt1/) + - Follow onscreen instruction to create an account + - Getting started information is present on the [home-assistant website](https://www.home-assistant.io/getting-started/) + - ##### Setup Jellyfin - Initial setup is just following on-screen instructions. - If asked to select server, delete it and refresh the page. - Point Jellyfin to use the directories mentioned in the playbooks for shows, movies, music and books. - By default, on the Jellyfin pod, the directories it will be: ``` - /media/data/shows - /media/data/movies - /media/data/music - /media/data/books + /data/root-disk/shows + /data/root-disk/movies + /data/root-disk/music + /data/root-disk/books ``` - Add any other config required. - Recommend setting up the Open Subtitles plugin which requires creating an account on [their website](https://www.opensubtitles.org/en/?). - - For Hardware acceleration go to ``Admin > Dashboard > Playback`` - - Enable ``Hardware acceleration`` - - Select ``Video Acceleration API (VAAPI)`` which is setup already to use the **integrated Intel GPU**. Not tested with anything else (like a dedicated AMD/Nvidea GPU) + - For Hardware acceleration go to `Admin > Dashboard > Playback` + - Enable `Hardware acceleration` + - Select `Video Acceleration API (VAAPI)` which is setup already to use the **integrated Intel GPU**. Not tested with anything else (like a dedicated AMD/Nvidea GPU) - You should see CPU usage drop and GPU usage go up, disable it if you dont or troubleshoot. - - You can use the ``intel-gpu-tools`` package to monitor (notice GPU usage when hardware encoding is enabled, and no GPU usage when it is disabled) at least the intel GPU by running the command below on the host: - ``sudo intel_gpu_top`` + - You can use the `intel-gpu-tools` package to monitor (notice GPU usage when hardware encoding is enabled, and no GPU usage when it is disabled) at least the intel GPU by running the command below on the host: + `sudo intel_gpu_top` - Select the formats for which hardware acceleration should be enabled - - Recommend not selecting ```HEVC 10bit``` because for some reason that breaks it + - Recommend not selecting `HEVC 10bit` because for some reason that breaks it - Defaults to CPU/software encoding if hardware acceleration does not work for a file, I think. - More infomarmation on their [Jellyfin's page for Hardware Acceleration](https://jellyfin.org/docs/general/administration/hardware-acceleration.html) + - Add any plugins you may want + - [Trackt](https://trakt.tv/dashboard) + - To track the shows you watch + - Create a Trackt account + - Go to `Admin > Dashboard > Plugins > Catalogue` + - Enable Trackt + - Restart Jellyfin (Shutdown server from the `Dashboard` and k8s will restart, or delete the pod) + - Go to `Admin > Dashboard > Plugins > Trackt` + - Select the user + - `Authorize Device` + - Follow onscreen instructions + - Go to `Admin > Dashboard > Scheduled Tasks > Trackt` + - Create a daily scheduled task for importing data from and exporting data to tract.tv - ##### Setup qBittorrent - - Default login credentials are admin/adminadmin + - Default login credentials are randomly generated, you need to look at ansible logs to get the default login credentials. + - Look for the substring `You can log into qBittorrent` in the logs to find the creds in the form `admin/` + - If `` is not seen, that means that a password was found to be set already and that a randomly generated password was not used. Please try to remeber the password or reinstall to override configuration to use default passwords again. - Change the default login details - - Go to ``Tools > Options > Web UI > Authentication`` + - Go to `Tools > Options > Web UI > Authentication` - Set default download location to one the mentioned directories (or make sure to put it in the right directory when downloading for ease) - - Recommend using ``/media/data/downloads`` + - Go to `Tools > Options > Downloads > Default Save Path` + - Recommend using `/data/root-disk/downloads` - Set seeding limits - - Recommend seeding limits for when seeding ratio hits "0". It is under ``Tools > Options > BitTorrent > Seeding Limits`` + - Recommend seeding limits for when seeding ratio hits "1" to give back to the community. It is under `Tools > Options > BitTorrent > Seeding Limits` - Set torrent download/upload limits - - Recommended to keep 6 active torrents/downloads and 0 uploads. It is under ``Tools > Options > BitTorrent > Torrent Queueing`` + - Recommended to keep 12 active torrents, 6 downloads and 6 uploads. It is under `Tools > Options > BitTorrent > Torrent Queueing` - ##### Setup Calibre - Do base setup - - Set folder to be ``/media/data/books`` and select ``Yes`` for it to rebuild the library if asked. - - Go to ``Preferences > Sharing over the net`` - - Check the box for ``Require username and password to access the Content server`` - - Check the box for ``Run the server automatically when calibre starts`` - - Click on ``Start server`` - - Go to the ``User accounts tab`` and create a user - - Make a note of the credentials for use in ``Readarr`` setup + - Set folder to be `/data/root-disk/books` and select `Yes` for it to rebuild the library if asked. + - Go to `Preferences > Sharing over the net` + - Check the box for `Require username and password to access the Content server` + - Check the box for `Run the server automatically when calibre starts` + - Click on `Start server` + - Go to the `User accounts tab` and create a user + - Make a note of the credentials for use in `Readarr` setup - Restart the app/pod - You can do so by also pressing `CTRL + R` on the main screen - ##### Setup Calibre Web - - Default login is ``admin/admin123`` - - Set folder to be ``/media/data/books`` - - To enable web reading, click on ``Admin`` (case sensitive) on the top right - - Click on the user, default is ``admin`` - - Enable ``Allow ebook viewer`` + - Default login is `admin/admin123` + - Set folder to be `/data/root-disk/books` + - To enable web reading, click on `Admin` (case sensitive) on the top right + - Click on the user, default is `admin` + - Enable `Allow ebook viewer` - Change password to something more secure - Save settings @@ -175,48 +210,70 @@ Use your own server | Radarr | Movies | | Lidarr | Music | - - Go to ``Settings`` and click on ``Show Advanced`` + - Go to `Settings` and click on `Show Advanced` - Enable authentication - - Go to ``Settings > General`` - - Set Authentication to `Forms (Login Page)` + - Set `Authentication` to `Forms (Login Page)` + - Set `Authentication Required` to `Enabled` - Set username and password for access - Add torrent client - - Go to ``Settings > Download Clients > Add > qBittorent > Custom`` - - Add the host: ``qbittorrent`` - - Add the port: ``10095`` - - Add the username: ```` - - Add the password: ```` - - Uncheck the ``Remove Completed`` option. - - When enabled, this seems to delete the downloaded files sometimes. Not sure why. + - Go to `Settings > Download Clients > Add > qBittorent` + - Add the host: `qbittorrent` + - Add the port: `10095` + - Add the username: `` + - Add the password: `` + - Enable the `Remove Completed` option. + - This will copy the download from the downloads directory to the destination directory for the service. Once the seeding limits are reached, it will delete the torrent and its files from the downloads directory. + - More information on [sonarrs's wiki page](https://wiki.servarr.com/sonarr/settings#Torrent_Process) and [radarr's wiki page](https://wiki.servarr.com/radarr/settings#Torrent_Process) under `Remove Completed Downloads`. They should all have the same idea though. - Set the root directories to be the following - - Go to ``Settings > Media Management`` - - | Service | Root Directory | - |---------|-------------------------| - | Readarr | ``/media/data/books/`` | - | Sonarr | ``/media/data/shows/`` | - | Radarr | ``/media/data/movies/`` | - | Lidarr | ``/media/data/music/`` | + - Go to `Settings > Media Management` + + | Service | Root Directory | + |---------|-------------------| + | Readarr | `/data/root-disk/books/` | + | Sonarr | `/data/root-disk/shows/` | + | Radarr | `/data/root-disk/movies/` | + | Lidarr | `/data/root-disk/music/` | - Enable renaming - + - Adjust quality definitions + - Go to `Settings > Quality` + - Set the `Size Limit` or `Megabytes Per Minute` (or equivalent) to appropriate numbers + - This will ensure your downloads are not "too big" + - For movies and shows, `2-3GiB/h` would usually be sufficient as the `Preferred` value, and you can leave the `Max` value a bit higher to ensure a better chance of download grabs + - Min: 0 + - Preferred: 30 + - Max: 70 (you can also use 2000 but you might get bigger files more often) + - Go to `Settings > Media Management` + - If present, make sure `Use Hardlinks instead of Copy` is enabled + - Radarr/Sonarr specific config + - Go to `Settings > Profiles` + - If present, for all relevant profiles (or just all of them), set the `Language` for the profile to be `Original` (or whatever language you prefer it to be instead) to download the media in that specific language. + - **[EXPERIMENTAL]** Enforce downloads of original language media only + - Go to `Settings > Custom Formats` + - Add a new Custom Format with `Language` Condition + - Set `Language: Original` + - Set `Required: True` + - Go to `Settings > Profiles` + - Select all [relevant] profiles and set the following + - `Minimum Custom Format Score` to `0` (sum of the custom formats scores) + - Your new Custom Format's score to be `0` (if the value is lower than the minimum score then downloads will be blocked) - Readarr specific config - - Go to ``Settings > Media Management`` + - Go to `Settings > Media Management` - Add root folder (you cannot edit an existing one) - - Set the path to be ``/media/data/books/`` - - Enable ``Use Calibre`` options the the following defaults - - Calibre host: ``calibre-webserver`` - - Calibre port: ``8081`` - - Calibre Username: ```` - - Calibre Password: ```` - - Enabled ``Rename Books`` and use the defaults + - Set the path to be `/data/root-disk/books/` + - Enable `Use Calibre` options the the following defaults + - Calibre host: `calibre-webserver` + - Calibre port: `8081` + - Calibre Username: `` + - Calibre Password: `` + - Enabled `Rename Books` and use the defaults - ##### Setup Prowlarr - Enable authentication - - Go to ``Settings > General`` - - Set Authentication to `Forms (Login Page)` + - Set `Authentication` to `Forms (Login Page)` + - Set `Authentication Required` to `Enabled` - Set username and password for access - Add `FlareSolverr` service as a proxy, refer to [this](https://trash-guides.info/Prowlarr/prowlarr-setup-flaresolverr/) guide for help - - Go to ``Settings > Indexers`` + - Go to `Settings > Indexers` - Add a new proxy for `FlareSolverr` - Add a tag to it, for example `flaresolverr` - **NOTE:** This tag needs to be used for any indexer that needs to bypass CloudFlare and DDoS-Gaurd protection @@ -226,6 +283,7 @@ Use your own server - Standard ``` 1337x + Add "flaresolverr" tag LimeTorrents The Pirate Bay EZTV @@ -235,11 +293,12 @@ Use your own server Anidex Add with higher priority, example "1", since it has good english subtitled content Add "flaresolverr" tag + Bangumi Moe Nyaa.si Tokyo Toshokan ``` - It is recommended to use private indexers for books and music as they are harder to find otherwise - - Add Sonarr, Radarr, Lidarr and Readarr to the ``Settings > Apps > Application`` section using the correct API token and kubernetes service names + - Add Sonarr, Radarr, Lidarr and Readarr to the `Settings > Apps > Application` section using the correct API token and kubernetes service names - By default prowlarr server will be: ``` http://prowlarr:9696 @@ -258,14 +317,14 @@ Use your own server - ##### Setup Bazarr - Enable authentication - - Go to ``Settings > General`` - - Under ``Security`` select ``Form`` as the form of ``Authentication`` + - Go to `Settings > General` + - Under `Security` select `Form` as the form of `Authentication` - Set username and password for access - Follow the official [Setup Guide](https://wiki.bazarr.media/Getting-Started/Setup-Guide/) - - Go to ``Settings > Radarr`` and ``Settings > Sonarr`` + - Go to `Settings > Radarr` and `Settings > Sonarr` - Click on `Enable` - Fill out the details and save - - Use the API tokens from the respective services, found under ``Settings > General > Security > API Key`` + - Use the API tokens from the respective services, found under `Settings > General > Security > API Key` - Use the kubernetes service name and port | Service Name | Port | @@ -275,29 +334,73 @@ Use your own server - Set a suitable minimum score, probabl `70` is fine - Fill out the path mappings if the directories in which data is stored is different for both services (by default both services will use the same directory to access data, so you dont need to change anything for a default install) - - Go to ``Settings > Languages`` + - Go to `Settings > Languages` - Add a language profile and set defaults for movies and series' - - Go to ``Settings > Provider`` and add providers for subtitles + - You may need to set language filters first before being able to create a profile with the languages in them + - Add both, for hearing impaired and regular ones, to increase your chances + - Go to `Settings > Provider` and add providers for subtitles - Decent options are: - Opensubtitles.com - TVSubtitles - YIFY Subtitles - Supersubtitles - - Go to ``Settings > Subtitles`` and make changes if needed + - Go to `Settings > Subtitles` and make changes if needed - Manually add the language profile to all the scanned media after first installation - NOTE: - If it doesnt work, manually restart the pod few times. It just works, not sure why. If that doesnt work, try reinstalling. + - ##### Setup Jellyseerr + - One stop shop for Sonarr/Radarr requests + - Run the first time setup for Jellyfin + - `Choose Server Type` + - Select `Jellyfin` + - `Account sign in` + - Jellyfin URL: `http://jellfin:8096` + - Email Address: `` + - Username: `` + - Password: `` + - You can then login using your Jellyfin credentials + - If you do not wish to do so, set a local user password by editing your account under `Users` to login with your email ID instead + - `Configure Media Server` + - Click on `Sync Libraries` + - Enable all Libraries that get listed + - Also run a manual scan + - `Configure Services` + - Setup all the services + - Use the correct API keys, hostnames and ports for the services + | Service Name | Port | + |--------------|------| + | jellyfin | 8096 | + | sonarr | 8989 | + | radarr | 7878 | + - Quality profile can be `HD-1080p` or `HD - 720/1080p` + - Select the applicable root folders + - Check relevant options that suit your needs + - General + - Enable `Tag Requests` + - Enable `Scan` + - Enable `Default Server` + - Sonarr specific + - Enable `Season Folders` + - Go to `Users` and either add new users or import from Jellyfin directly + - This is not required by default + - Give them `Manage Requests` and other permissions for ease where applicable + - Go to `Settings -> Users` and give them all `Auto-Approve` and `Auto-Request` Permissions by default for ease. + + - ##### Setup Immich + - Just follow onscreen instructions to create an account + - Setup the config as you please from there! + - ##### Setup Ombi - One stop shop for Sonarr/Radarr/Lidarr requests - Get the API keys for Jellyfin, Sonarr and Radarr - Jellyfin - - Go to ``Admin > Dashboard > API Keys`` + - Go to `Admin > Dashboard > API Keys` - Generate a new API key with an appropriate name - Sonarr/Radarr/Lidarr - - Use the API tokens from the respective services, found under ``Settings > General > Security > API Key`` + - Use the API tokens from the respective services, found under `Settings > General > Security > API Key` - Set credentials for login - - Go to ``Settings`` + - Go to `Settings` - Use the correct API keys, hostnames and ports for the services | Service Name | Port | |--------------|------| @@ -305,53 +408,32 @@ Use your own server | sonarr | 8989 | | radarr | 7878 | | lidarr | 8686 | - - Click on the ``Load Profiles`` and ``Load Root Folders`` buttons and use the appropriate defaults as used in the services seen [here](#setup-radarrsonarrreadarrlidarr). - - Setup ``Movies`` using ``Radarr`` - - Setup ``TV`` using ``Sonarr`` - - Enable the ``Enable season folders`` option - - Enable the ``V3`` option - - Setup ``Music`` using ``Lidarr`` - - Setup ``Media Server`` using ``Jellyfin`` - - **Dont forget to click on ``Enable`` for each of those setups as well** - - Go to ``Users`` + - Click on the `Load Profiles` and `Load Root Folders` buttons and use the appropriate defaults as used in the services seen [here](#setup-radarrsonarrreadarrlidarr). + - Setup `Movies` using `Radarr` + - Setup `TV` using `Sonarr` + - Enable the `Enable season folders` option + - Enable the `V3` option + - Setup `Music` using `Lidarr` + - Setup `Media Server` using `Jellyfin` + - **Dont forget to click on `Enable` for each of those setups as well** + - Go to `Users` - Setup additional users - Give the following roles to *trusted* users for convinience ``` - AutoApproveMusic - RequestMovie - AutoApproveTv - RequestMusic - AutoApproveMovie - RequestTv + Request Tv + Request Movie + Request Music + Auto Approve Tv + Auto Approve Movie + Auto Approve Music ``` - - ##### Setup Minikube for remote access - - Use the kubeconfig file copied over to the current working directory by exporting it - - `export KUBECONFIG=` - - Optionally, edit your local `~/.kube/config` and incorporate the information from the copied over kubeconfig into it - - **NOTE:** - - The port on which kube-apiserver is forwarded to, 3001 by default, should not be exposed to the internet (i.e., should be LAN access only) because anyone will be able to access it. - - The way it is set up at the moment, the certs dont really do anything. The apiserver itself is directly accessible without any authentication. - - See [issue #12](https://github.com/Kimi450/ubuntu_server/issues/12)). - - By default, `ansible_host` from the `hosts.yaml` file is used as the IP in the kubeconfig file. It is **strongly recommended** that you change that to the LAN IP of the server (to not have to port forward this on your router to access it) - - ##### Use Squid - Use the username and password from the `group_vars/all` file to use this as a proxy server - The address would be `:` or `:` or `:` - ##### Use Sambashare - For external access: - - The following info was retrieved by running `sudo ufw status verbose | grep -i samba` on the server which lists what ports were exposed as part of `sudo ufw allow samba` - - Expose the following ports for TCP - ``` - 139 - 445 - ``` - - Expose the following ports for UDP - ``` - 137 - 138 - ``` - To authenticate - Thee username will be the `` you used in the `hosts.yaml` file - The password will be in the `group_vars/all` file (`smb.password` section). @@ -362,7 +444,7 @@ Use your own server - You need to create DNS entries to access the Ingress services. The following entries are recommended: - `*.` - `` - - You can port forward the following ports on your router to gain external access. On your router: + - Setup NAT-ing for the the following ports on your router to gain external access. On your router: - Set a static IP for your server (if applicable) so the router doesnt assign a different IP to the machine breaking your port-forwarding setup - Following are some sample rules based on the `all` file defaults for port forwarding, feel free to tweak to your needs. @@ -371,19 +453,131 @@ Use your own server | ssh | ssh | `` or `` | 22 | `` | | samba | proxy | `\\\` or `\\\` | TCP: `139,445`, UDP: `137,138` | `` | | squid | proxy | `:` or `:` | `` | `` | - | grafana | Ingress | `grafana.` | 8080 | 80 | - | jellyfin | Ingress | `jellyin.` | 8080 | 80 | - | ombi | Ingress | `ombi.` | 8080 | 80 | - | prowlarr | Ingress | `prowlarr.` | 8080 | 80 | - | bazarr | Ingress | `bazarr.` | 8080 | 80 | - | radarr | Ingress | `radarr.` | 8080 | 80 | - | sonarr | Ingress | `sonarr.` | 8080 | 80 | - | readarr | Ingress | `readarr.` | 8080 | 80 | - | lidarr | Ingress | `lidarr.` | 8080 | 80 | - | librespeed | Ingress | `librespeed.` | 8080 | 80 | - | calibre-web | Ingress | `calibre-web.` | 8080 | 80 | - | calibre | LAN | `:3002` (No ingress rules defined) | 3002 | `` | - | minikube | LAN api-access | `:3001` | 3001 | `` | + | grafana | Ingress | `grafana.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | jellyfin | Ingress | `jellyfin.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | jellyseerr | Ingress | `jellyseerr.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | ombi | Ingress | `ombi.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | prowlarr | Ingress | `prowlarr.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | bazarr | Ingress | `bazarr.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | radarr | Ingress | `radarr.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | sonarr | Ingress | `sonarr.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | readarr | Ingress | `readarr.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | lidarr | Ingress | `lidarr.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | immich | Ingress | `immich.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | librespeed | Ingress | `librespeed.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | calibre-web | Ingress | `calibre-web.` | 30080 (HTTP) / 30443 (HTTPS) | 80 (HTTP) / 443 (HTTPS) | + | calibre | LAN | `:30000` (No ingress rules defined) | 30100 | `` | NOTE: Security is an unkown when exposing a service to the internet. + - If you cannot do NAT setup on your router and need the server to run ingress on 80 and 443, you can use this [post's answer](https://stackoverflow.com/questions/55907537/how-to-expose-kubernetes-service-on-prem-using-443-80) to run the ingress controller on host network + ```yaml + kind: ... + apiVersion: apps/v1 + metadata: + name: nginx-ingress-controller + spec: + ... + template: + spec: + hostNetwork: true <---------- Add this + containers: + - name: nginx-ingress-lb + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0 + ports: + - name: http + hostPort: 80 <---------- Add this + containerPort: 80 + protocol: TCP + - name: https + hostPort: 443 <---------- Add this + containerPort: 443 + protocol: TCP + ... + ``` +# Appendix + +## Kubernetes metrics server + +For troubleshooting, it might be useful to run the metrics server on your cluster + +Refer to this [guide](https://medium.com/@cloudspinx/fix-error-metrics-api-not-available-in-kubernetes-aa10766e1c2f) for it. + +## Prometheus TSDB Backup Restore + +In case of a migration, you may choose to wnat to migrate data from prometheus along with the app backups stored in the server's app-config dir. + +Resources: +- https://devopstales.github.io/home/backup-and-retore-prometheus/ +- https://prometheus.io/docs/prometheus/latest/querying/api/ +- https://gist.github.com/ksingh7/d5e4414d92241e0802e59fa4c585b98b + +### Enable admin API + +```bash +kubectl -n monitoring patch prometheus kube-prometheus-stack-prometheus --type merge --patch '{"spec":{"enableAdminAPI":true}}' +``` + +### Verify admin API is enabled + +```bash +kubectl describe pod -n monitoring prometheus-kube-prometheus-stack-prometheus-0 | grep -i admin +``` + +To see + +```bash + --web.enable-admin-api +``` + +### Create TSDB snapshot + +Start port forwardning in a different terminal and leave it running + +```bash +kubectl -n monitoring port-forward svc/kube-prometheus-stack-prometheus 9090 +``` + +Take snapshot + +```bash +curl -v -X 'POST' -ks 'localhost:9090/api/v1/admin/tsdb/snapshot' +``` + +### Download TSDB snapshot + +#### Option 1: Download from pod to host + +```bash +TMP_DIR=$(mktemp -d) +kubectl cp -c prometheus prometheus-kube-prometheus-stack-prometheus-0:/prometheus/snapshots ${TMP_DIR} +``` + +#### Option 2: Find the PV on your host and make a backup of the contents [RECOMMENDED] + +This is easier and in the context of this server's setup. + +```bash +export TMP_DIR=$(mktemp -d) + +export PV_DIR=$(kubectl get pv -o yaml $(kubectl get pv | grep monitoring/prometheus-kube-prometheus-stack-prometheus-db-prometheus-kube-prometheus-stack-prometheus-0 | cut -d' ' -f1) | grep "path:" | cut -d " " -f 6) + +cp -r ${PV_DIR}/prometheus-db/snapshots/* ${TMP_DIR} +``` + +### Restore Backup + +Copy over your backup to any other host if applicable. + +```bash +export PV_DIR=$(kubectl get pv -o yaml $(kubectl get pv | grep monitoring/prometheus-kube-prometheus-stack-prometheus-db-prometheus-kube-prometheus-stack-prometheus-0 | cut -d' ' -f1) | grep "path:" | cut -d " " -f 6) + +# clear dir. Might not be needed +rm -rf ${PV_DIR}/prometheus-db/* + +# copy over old data +mv ${TMP_DIR}/* ${PV_DIR}/prometheus-db/ +``` + +## Network troubleshooting tools +This repo will be of use: https://github.com/nicolaka/netshoot diff --git a/charts_config/skeleton.service b/charts_config/skeleton.service deleted file mode 100644 index c85e657..0000000 --- a/charts_config/skeleton.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=Setup a secure tunnel -StartLimitInterval=0 -After=network.target - -[Service] -Type=simple -RemainAfterExit=no -# After= didnt work -# https://unix.stackexchange.com/questions/213185/restarting-systemd-service-on-dependency-failure -ExecStartPre=minikube status -ExecStart=PLACEHOLDER -Restart=always -RestartSec=5 -User=PLACEHOLDER - -[Install] -WantedBy=multi-user.target diff --git a/deprecated_playbooks/spotify/install-and-configure-spotifyd.yaml b/deprecated_playbooks/spotify/install-and-configure-spotifyd.yaml index a096161..308c598 100644 --- a/deprecated_playbooks/spotify/install-and-configure-spotifyd.yaml +++ b/deprecated_playbooks/spotify/install-and-configure-spotifyd.yaml @@ -1,9 +1,9 @@ --- - name: Install and configure spotifyd - hosts: home-main + hosts: all gather_facts: false vars: - repo_dir: "{{ dir_repos }}/spotifyd" + repo_dir: "{{ basics.repos_dir }}/spotifyd" systemd_file_location: /etc/systemd/system/ tasks: # https://spotifyd.github.io/spotifyd/installation/Ubuntu.html#building-spotifyd diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..8518dc9 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,50 @@ +version: "3.3" + +networks: + loki: + +services: + loki: + image: grafana/loki:latest + ports: + - "3100:3100" + command: -config.file=/etc/loki/local-config.yaml + networks: + - loki + + promtail: + image: grafana/promtail:latest + volumes: + - /var/log:/var/log + command: -config.file=/etc/promtail/config.yml + networks: + - loki + + grafana: + environment: + + - GF_FEATURE_TOGGLES_ENABLE=alertingSimplifiedRouting,alertingQueryAndExpressionsStepMode + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + orgId: 1 + url: http://loki:3100 + basicAuth: false + isDefault: true + version: 1 + editable: false + EOF + /run.sh + image: grafana/grafana:latest + ports: + - "3000:3000" + networks: + - loki diff --git a/group_vars/all b/group_vars/all index 0af1e04..72d12b0 100644 --- a/group_vars/all +++ b/group_vars/all @@ -6,135 +6,223 @@ domain_name: "" # FILL OUT # for basic server setup (in my opinion) basics: - enabled: True + enabled: false + # reboot machine + reboot: false + + # user's home dir + home_dir: "/home/{{ ansible_user }}" + + # dir to pull repos into + repos_dir: "/home/{{ ansible_user }}/repos" # vnc for remote desktop with GUI vnc: - enabled: True + enabled: false # will be truncated to 8 characters password: "" # FILL OUT # display for what port will be accessed # use 0-9 only for access port to be 590{{display}} display: "4" + # working directory for the VNC session + working_dir: "/home/{{ ansible_user }}" + # squid credentials, for proxy squid: - enabled: True + enabled: false username: "" # FILL OUT password: "" # FILL OUT port: "3128" # sambashare for network attached storage smb: - enabled: True + enabled: false password: "" # FILL OUT share_name: "sambashare" + # the directory to share + share_dir: "/home/{{ ansible_user }}" + # If you are using cloudflare and want to auto update entries # when the server IP changes # NOTE: It doesnt add entries, refer to the README.md file for instructions cloudflare: - enabled: True + enabled: false + # Set your Cloudflare Zone ID zone_id: "" # FILL OUT + # Set your Cloudflare Bearer token to view the zone and edit DNS records in it auth_key: "" # FILL OUT - # WORK_IN_PROGRESS: Set your Cloudfare API token for editing zone DNS - api_token: "WORK_IN_PROGRESS" # ==== Select what charts you wish to install ==== -charts: - enabled: True - resources: - # kubernetes resource requests for the pods - requests: - cpu: 10m - memory: 50M - # kubernetes resource limits for the pods - # probably should match these with the minikube resources from below - limits: - cpu: 4 - memory: "6000M" - # timeout as helm expects it in --timeout for the charts - timeout: 15m - services: - # log monitoring with Loki (Needs Grafana for a UI) - loki_stack: - enabled: True - # metrics monitoring with Grafana and Prometheus - kube_prometheus_stack: - enabled: True - grafana: - admin_username: "admin" # FILL OUT - admin_password: "admin" # FILL OUT - # media client - jellyfin: - enabled: True - # torrents - qbittorrent: - enabled: True - # to bypass cloudflare protection - flaresolverr: - enabled: True - # to manage indexers - prowlarr: - enabled: True - # movie monitoring - radarr: - enabled: True - # show monitoring - sonarr: - enabled: True - # subtitle hunting - bazarr: - enabled: True - # book monitoring - readarr: - enabled: False - # music monitoring - lidarr: - enabled: False - # movie and show interface - ombi: - enabled: True - # speed test to server - librespeed: - enabled: True - # book client - calibre_web: - enabled: False - # book management - calibre: - enabled: False - -# ==== To install Docker, helm and kubectl ==== - -cloud_native: - enabled: True - -# ==== Minikube settings ==== - -minikube: # cli param values for minikube - enabled: True - # This cant be less than 2, it is the minimum required for kubernetes - cpus: "max" - # This cant be less than 1800, it is the minimum required for kubernetes - memory: "6000" - disk_size: "50gb" - nodes: "1" - cert_expiration: "87660h0m0s" - remote_access: - enabled: True +services: + # directory in which the apps will store their configs + configs_dir: "/home/{{ ansible_user }}/data/app-configs" + + # nginx ingress controller for reverse proxy + traefik: + enabled: false + + # manager for tls certs + cert_manager: + enabled: false + # email for the ACME account registration + # FYI: you dont need to create any account for this, just use an email ID + email: "" # FILL OUT + + # metrics monitoring with Grafana and Prometheus + monitoring: + enabled: true + admin_username: "admin" # FILL OUT + admin_password: "admin" # FILL OUT + + # cloud native instance of postgres DB + cnpg: + enabled: false + + # google photos alternative + # NOTE: REQUIRES YOU TO HAVE CNPG INSTALLED BEFORE + # - Either set `cnpg.enabled` to false or + # - make sure you have installed it first (incase of an upgrade) + immich: + enabled: false + # base dir for all immich info, including backups, cache, etc + # look at `install-charts.yaml` for more information + host_path: "/home/{{ ansible_user }}/data/immich" + + # home automation portal + home_assistant: + enabled: false + + # media client + jellyfin: + enabled: false + + # torrents + qbittorrent: + enabled: false + + # dirs to download files + # you can specify different mounts present on your server + # NOTE: The 'name' must follow the following regex validation: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + # If you dont know what that means, just use lower case characters + # and dashes to separate them or a mix of upper and lowercase characters + persistence: + - name: root-disk + host_path: "/home/{{ ansible_user }}/data/downloads" + # - name: spare-disk + # host_path: "/mnt/b/downloads" + + # to bypass cloudflare protection + flaresolverr: + enabled: false + + # to manage indexers + prowlarr: + enabled: false + + # movie monitoring + radarr: + enabled: false + + # dirs to store and organise movies + # you can specify different mounts present on your server + # NOTE: The 'name' must follow the following regex validation: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + # If you dont know what that means, just use lower case characters + # and dashes to separate them or a mix of upper and lowercase characters + persistence: + - name: root-disk + host_path: "/home/{{ ansible_user }}/data/movies" + # - name: spare-disk + # host_path: "/mnt/b/movies" + + # show monitoring + sonarr: + enabled: false + + # dirs to store and organise shows + # you can specify different mounts present on your server + # NOTE: The 'name' must follow the following regex validation: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + # If you dont know what that means, just use lower case characters + # and dashes to separate them or a mix of upper and lowercase characters + persistence: + - name: root-disk + host_path: "/home/{{ ansible_user }}/data/shows" + # - name: spare-disk + # host_path: "/mnt/b/shows" + + # subtitle hunting + bazarr: + enabled: false + + # movie and show interface + jellyseerr: + enabled: false + + # speed test to server + librespeed: + enabled: false + + # book monitoring + readarr: + enabled: false + + # dirs to store and organise books + # you can specify different mounts present on your server + # NOTE: The 'name' must follow the following regex validation: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + # If you dont know what that means, just use lower case characters + # and dashes to separate them or a mix of upper and lowercase characters + persistence: + - name: root-disk + host_path: "/home/{{ ansible_user }}/data/books" + # - name: spare-disk + # host_path: "/mnt/b/books" + + # music monitoring + lidarr: + enabled: false + + # dirs to store and organise music + # you can specify different mounts present on your server + # NOTE: The 'name' must follow the following regex validation: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + # If you dont know what that means, just use lower case characters + # and dashes to separate them or a mix of upper and lowercase characters + persistence: + - name: root-disk + host_path: "/home/{{ ansible_user }}/data/music" + # - name: spare-disk + # host_path: "/mnt/b/music" + + # book client + calibre_web: + enabled: false + + # book management + calibre: + enabled: false + + # movie, show and music interface + ombi: + enabled: false + + +# ==== k8s settings ==== + +docker: + enabled: false + # to allow pinless bluetooth connections to the server bluetooth: pinless: - enabled: False + enabled: false # ==== For the Archived playbooks ==== spotify: # spotify settings - enabled: False + enabled: false username: "NO_NEED_TO_FILL" password: "NO_NEED_TO_FILL" init_volume: "69" diff --git a/hosts.yaml b/hosts.yaml index be93dd8..6a6aba1 100644 --- a/hosts.yaml +++ b/hosts.yaml @@ -1,15 +1,14 @@ all: hosts: home-main: - ansible_host: - ansible_port: - ansible_user: - # will ignore password if not required - ansible_sudo_pass: + ansible_host: "" # FILL OUT + ansible_port: 22 # FILL OUT + ansible_user: "" # FILL OUT + ansible_sudo_pass: "" # FILL OUT # adding multiple hosts wont play well with the group_vars file probably with the current setup # So use the below as just an example # home-throwaway: - # ansible_host: 192.168.178.3 - # ansible_port: 2002 + # ansible_host: 192.168.178.95 + # ansible_port: 22 # ansible_user: kimi450 # ansible_sudo_pass: i_HaVe_A_gOoD_pAsSwOrD+123! diff --git a/install-and-configure-minikube.yaml b/install-and-configure-minikube.yaml deleted file mode 100644 index c6aa7c6..0000000 --- a/install-and-configure-minikube.yaml +++ /dev/null @@ -1,123 +0,0 @@ ---- -- name: Install and configure Minikube - hosts: all - gather_facts: false - vars: - service_file_location: /etc/systemd/system/minikube.service - service_name_minikube: minikube.service - port_expose_file_location: /etc/minikube-apiserver-port-forward.conf - tasks: - - name: Generate minikube file name - command: "echo -n minikube_latest_{{ architecture }}.deb" - register: minikube_file_name - - - name: Install Minikube - # https://minikube.sigs.k8s.io/docs/start/ - block: - - name: Download minikube package - shell: "curl -LO https://storage.googleapis.com/minikube/releases/latest/{{ minikube_file_name.stdout }}" - - - name: Install minikube - become: true - shell: "dpkg -i {{ minikube_file_name.stdout }}" - - - name: Delete minikube package - file: - path: "{{ minikube_file_name.stdout }}" - state: absent - - - name: Setup and enable systemd service - block: - - name: Copy over the service file for systemd - become: true - copy: - src: minikube_config/{{ service_name_minikube }} - dest: "{{ service_file_location }}" - - - name: "Service file edit: Add command to start minikube" - become: true - lineinfile: - path: "{{ service_file_location }}" - regexp: '^ExecStartPre=(.*)$' - line: 'ExecStartPre=/usr/bin/minikube start --embed-certs=true --mount --mount-string "{{ dir_home }}:{{ dir_minikube_mount }}" --cpus="{{ minikube.cpus }}" --memory="{{ minikube.memory }}" --disk-size="{{ minikube.disk_size }}" --nodes="{{ minikube.nodes }}" --cert-expiration="{{ minikube.cert_expiration }}"' - backrefs: yes - - - name: "Service file edit: Add reverse proxy script" - become: true - lineinfile: - path: "{{ service_file_location }}" - regexp: '^ExecStart=(.*)$' - line: "ExecStart=kubectl proxy --address='0.0.0.0' --port={{ api_server_forwarded_port }} --accept-hosts='.*'" - backrefs: yes - - - name: "Service file edit: Change User to be {{ ansible_user }}" - become: True - # https://gist.github.com/drmalex07/c0f9304deea566842490 - lineinfile: - path: "{{ service_file_location }}" - regexp: '^User=(.*)$' - line: "User={{ ansible_user }}" - backrefs: yes - - - name: Reload systemd service - include_tasks: tasks-reload-systemd-service.yaml - vars: - service_name: "{{ service_name_minikube }}" - become: True - - - name: Minikube notes - debug: - msg: > - Directory '{{ dir_home }}' mounted on the minikube container at - '{{ dir_minikube_mount }}' with UID '{{ uid }}''. - More info https://github.com/kubernetes/minikube/pull/8159" - - - name: Setup nginx ingress controller - block: - - name: Enable nginx ingress controller in minikube - shell: minikube addons enable ingress - retries: 100000 - delay: 10 - register: result - until: result.rc == 0 - - - name: Expose nginx ingress controller - include_tasks: tasks-kubernetes-port-forward-service.yaml - vars: - service_name: SKIP - service_file_name: nginx-ingress-controller.service - service_file_location_base: /etc/systemd/system/ - host_port: 8080 - service_port: 80 - - - name: Expose nginx ingress controller for SSL - include_tasks: tasks-kubernetes-port-forward-service.yaml - vars: - service_name: SKIP - service_file_name: nginx-ingress-controller-ssl.service - service_file_location_base: /etc/systemd/system/ - host_port: 8081 - service_port: 443 - - - name: Allow ports '8080' and '8081' for nginx - include_tasks: tasks-allow-ports.yaml - vars: - ports: - - "8080" - - "8081" - - - debug: - msg: You can access the ingress controller at port '8080' and '8081' for HTTP and HTTPS. - - - name: "Allow port '{{ api_server_forwarded_port }}' for api server" - include_tasks: tasks-allow-ports.yaml - vars: - ports: - - "{{ api_server_forwarded_port }}" - - - name: Wait till minikube is running - shell: minikube status - retries: 100000 - delay: 10 - register: result - until: result.rc == 0 \ No newline at end of file diff --git a/install-and-configure-samba.yaml b/install-and-configure-samba.yaml index 1ad5e55..1c33192 100644 --- a/install-and-configure-samba.yaml +++ b/install-and-configure-samba.yaml @@ -3,7 +3,6 @@ hosts: all gather_facts: True vars: - share_directory: "{{ dir_home }}" config_location: /etc/samba/smb.conf tasks: # https://ubuntu.com/tutorials/install-and-configure-samba#1-overview @@ -14,9 +13,9 @@ pkg: samba state: latest - - name: "Create {{ share_directory }} if it does not exist" + - name: "Create {{ smb.share_dir }} if it does not exist" ansible.builtin.file: - path: "{{ share_directory }}" + path: "{{ smb.share_dir }}" state: directory mode: '0755' @@ -27,7 +26,7 @@ block: | [{{ smb.share_name }}] comment = Samba on Ubuntu - path = {{ share_directory }} + path = {{ smb.share_dir }} read only = no browsable = yes @@ -35,15 +34,6 @@ become: True shell: service smbd restart - - include_tasks: tasks-allow-ports.yaml - # Following ports on respective protocols are exposed because of this - # Run `sudo ufw status verbose | grep Samba` to verify - # UDP: 137,138 - # TCP: 139,445 - vars: - ports: - - "samba" - - name: Set password become: True shell: "(echo \"{{ smb.password }}\"; echo \"{{ smb.password }}\") | smbpasswd -s -a {{ ansible_user }}" diff --git a/install-and-configure-squid.yaml b/install-and-configure-squid.yaml index be8667a..7c9bf6c 100644 --- a/install-and-configure-squid.yaml +++ b/install-and-configure-squid.yaml @@ -80,11 +80,6 @@ service_name: "squid" become: True - - include_tasks: tasks-allow-ports.yaml - vars: - ports: - - "{{ squid.port }}" - - debug: msg: >- You can now use this server as a proxy. The hostname is diff --git a/install-charts.yaml b/install-charts.yaml deleted file mode 100644 index 8e33378..0000000 --- a/install-charts.yaml +++ /dev/null @@ -1,610 +0,0 @@ ---- -- name: Install basic charts - hosts: all - gather_facts: true - vars: - namespace_user: "{{ ansible_user }}" - namespace_monitoring: monitoring - namespace_generic_services: generic-services - - helm_kube_prometheus_stack_loki_config: "" # default - - # The following has been set to disable Truecharts own injection - # of manifests for SCALE products I believe either way I dont need it: - # - manifestManager.enabled=false - helm_common_general: "\ - global.addMetalLBAnnotations=false,\ - global.addTraefikAnnotations=false,\ - operator.verify.enabled=false,\ - manifestManager.enabled=false,\ - workload.main.type=StatefulSet,\ - podOptions.dnsConfig.nameservers={8.8.8.8,8.8.4.4}" - - helm_common_ingress: "\ - ingress.main.enabled=true,\ - ingress.main.primary=false,\ - ingress.main.ingressClassName=nginx,\ - ingress.main.fixedMiddlewares={},\ - ingress.main.enableFixedMiddlewares=false,\ - ingress.main.hosts[0].paths[0].path='/',\ - ingress.main.hosts[0].paths[0].pathType='Prefix'" - - helm_common_resources: "\ - resources.requests.cpu={{ charts.resources.requests.cpu }},\ - resources.requests.memory={{ charts.resources.requests.memory }},\ - resources.limits.cpu={{ charts.resources.limits.cpu }},\ - resources.limits.memory={{ charts.resources.limits.memory }}" - - # persistence.config.hostPathType=DirectoryOrCreate doesnt work - # because it creates the dir with root ownership and no write permissions - helm_common_persistence: "\ - persistence.shared.enabled=false,\ - persistence.shm.enabled=false,\ - persistence.temp.enabled=false,\ - persistence.varlogs.enabled=False,\ - persistence.config.enabled=true,\ - persistence.config.size=1Gi" - - helm_common_persistence_config: "\ - persistence.config.enabled=true,\ - persistence.config.type=hostPath,\ - persistence.config.mountPath=/config" - - helm_common_persistence_media: "\ - persistence.media.enabled=true,\ - persistence.media.type=hostPath,\ - persistence.media.mountPath={{ dir_mount_path }},\ - persistence.media.hostPath={{ dir_minikube_mount }}" - - # readOnlyRootFilesystem=false is needed for the app to be able to take - # backups in /config/Backups - # runAsUser={{ uid }} gives write access on the pod - helm_common_security_contexts: "\ - securityContext.container.readOnlyRootFilesystem=false,\ - securityContext.container.runAsUser={{ uid }},\ - securityContext.container.runAsGroup=568" - - tasks: - - name: Create namespaces namespace - shell: "kubectl create namespace {{ item }} --dry-run=client -o yaml | kubectl apply -f -" - with_items: - - "{{ namespace_user }}" - - - name: Install loki-stack - when: charts.services.loki_stack.enabled - block: - - name: Install/Upgrade the loki-stack chart - include_tasks: tasks-install-chart.yaml - # https://artifacthub.io/packages/helm/grafana/loki-stack - vars: - repo_name: grafana - repo_link: https://grafana.github.io/helm-charts - install_namespace: "{{ namespace_monitoring }}" - timeout: "{{ charts.timeout }}" - release_name: loki-stack - chart_name: loki-stack - set_options: "--set \ - loki.isDefault=False,\ - test_pod.enable=False,\ - fluent-bit.enabled=True,\ - promtail.enabled=False" - - - name: Set config for Grafana to add Loki as a data source - set_fact: - helm_kube_prometheus_stack_loki_config: "\ - grafana.additionalDataSources[0].name=Loki,\ - grafana.additionalDataSources[0].type=loki,\ - grafana.additionalDataSources[0].access=proxy,\ - grafana.additionalDataSources[0].isDefault=False,\ - grafana.additionalDataSources[0].url='http://loki-stack.{{ namespace_monitoring }}:3100'," - - - name: Install kube-prometheus-stack - when: charts.services.kube_prometheus_stack.enabled - block: - - name: Install/Upgrade the kube-prometheus-stack chart - include_tasks: tasks-install-chart.yaml - # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack - vars: - repo_name: prometheus-community - repo_link: https://prometheus-community.github.io/helm-charts - install_namespace: "{{ namespace_monitoring }}" - timeout: "{{ charts.timeout }}" - release_name: kube-prometheus-stack - chart_name: kube-prometheus-stack - set_options: "--set \ - {{ helm_kube_prometheus_stack_loki_config }}\ - prometheus.prometheusSpec.retention=730d,\ - prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes={'ReadWriteOnce'},\ - prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage='50Gi',\ - prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false,\ - prometheus.prometheusSpec.serviceMonitorNamespaceSelector=null,\ - prometheus.prometheusSpec.serviceMonitorSelector=null,\ - prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues=false,\ - prometheus.prometheusSpec.ruleNamespaceSelector=null,\ - prometheus.prometheusSpec.ruleSelector=null,\ - prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false,\ - prometheus.prometheusSpec.podMonitorNamespaceSelector=null,\ - prometheus.prometheusSpec.podMonitorSelector=null,\ - prometheus.prometheusSpec.probeSelectorNilUsesHelmValues=false,\ - prometheus.prometheusSpec.probeSelectorNamespaceSelector=null,\ - prometheus.prometheusSpec.probeSelector=null,\ - grafana.persistence.enabled=true,\ - grafana.adminUser={{ charts.services.kube_prometheus_stack.grafana.admin_username }},\ - grafana.adminPassword={{ charts.services.kube_prometheus_stack.grafana.admin_password }},\ - grafana.ingress.enabled=true,\ - grafana.ingress.ingressClassName=nginx,\ - grafana.ingress.hosts={grafana.{{ domain_name }}},\ - grafana.ingress.enabled=true" - - - name: Restart pods to pick up any config updates - include_tasks: tasks-kubernetes-delete-kind-instances.yaml - vars: - kind: pod - namespace: "{{ namespace_monitoring }}" - contains: kube-prometheus-stack - - - debug: - msg: > - You can log into Grafana at 'grafana.{{ domain_name }}' using - {{ charts.services.kube_prometheus_stack.grafana.admin_username }}/ - {{ charts.services.kube_prometheus_stack.grafana.admin_password }} - - - name: Install jellyfin - when: charts.services.jellyfin.enabled - block: - - name: Install/Upgrade the jellyfin chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: jellyfin - chart_name: jellyfin - # to allow the pod to be able to use the /dev mount - # to access /dev/dri/renderD128 for hwa, these options are set to true - # - securityContext.container.runAsUser=0 - # - securityContext.container.privileged=true - # - securityContext.container.allowPrivilegeEscalation=true - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - securityContext.container.runAsUser=0,\ - securityContext.container.privileged=true,\ - securityContext.container.runAsNonRoot=false,\ - securityContext.container.allowPrivilegeEscalation=true,\ - persistence.cache.enabled=true,\ - persistence.cache.accessMode=ReadWriteOnce,\ - persistence.cache.size=50G,\ - persistence.dev.enabled=true,\ - persistence.dev.type=hostPath,\ - persistence.dev.mountPath=/dev,\ - persistence.dev.hostPath=/dev,\ - ingress.main.hosts[0].host='jellyfin.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=jellyfin,\ - ingress.main.hosts[0].paths[0].service.port=8096" - - - debug: - msg: > - You can log into Jellyfin at 'jellyfin.{{ domain_name }}'. - '{{ dir_home }}' is available under '/media' and can be used by - Jellyfin. If need be, delete any existing server and go to the - URL mention above once again to setup a new server. - - - name: Install qbittorrent - when: charts.services.qbittorrent.enabled - block: - - name: Install/Upgrade the qbittorrent chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: qbittorrent - chart_name: qbittorrent - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - ingress.main.hosts[0].host='qbittorrent.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=qbittorrent,\ - ingress.main.hosts[0].paths[0].service.port=10095" - - - debug: - msg: > - You can log into qBittorrent at 'qbittorrent.{{ domain_name }}' using - admin/adminadmin as the default creds. Change this after deployment. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application to download things. Downloading in - the directories under {{ dir_data }} will be picked up by Jellyfin. - - - name: Install flaresolverr - when: charts.services.flaresolverr.enabled - block: - - name: Install/Upgrade the flaresolverr chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: flaresolverr - chart_name: flaresolverr - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_resources }}" - - - debug: - msg: > - You can flaresolverr to bypass Cloudflare's protection - - - name: Install prowlarr - when: charts.services.prowlarr.enabled - block: - - name: Create config directory on hostpath for prowlarr - file: - path: "{{ dir_home }}{{ dir_data_config_suffix }}/prowlarr" - state: directory - mode: '0755' - - - name: Install/Upgrade the prowlarr chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: prowlarr - chart_name: prowlarr - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - {{ helm_common_persistence_config }},\ - persistence.config.hostPath={{ dir_minikube_mount }}{{ dir_data_config_suffix }}/prowlarr,\ - ingress.main.hosts[0].host='prowlarr.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=prowlarr,\ - ingress.main.hosts[0].paths[0].service.port=9696" - - - debug: - msg: > - You can log into prowlarr at 'prowlarr.{{ domain_name }}'. Go to this - URL and add the indexers you wish to use. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application to download things. Downloading in - the directories under {{ dir_data }} will be picked up by Jellyfin. - - - name: Install radarr - when: charts.services.radarr.enabled - block: - - name: Create config directory on hostpath for radarr - file: - path: "{{ dir_home }}{{ dir_data_config_suffix }}/radarr" - state: directory - mode: '0755' - - - name: Install/Upgrade the radarr chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: radarr - chart_name: radarr - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - {{ helm_common_persistence_config }},\ - persistence.config.hostPath={{ dir_minikube_mount }}{{ dir_data_config_suffix }}/radarr,\ - ingress.main.hosts[0].host='radarr.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=radarr,\ - ingress.main.hosts[0].paths[0].service.port=7878" - - - debug: - msg: > - You can log into radarr at 'radarr.{{ domain_name }}'. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application to download things. Downloading in - the directories under {{ dir_data }} will be picked up by Jellyfin. - - - name: Install sonarr - when: charts.services.sonarr.enabled - block: - - name: Create config directory on hostpath for sonarr - file: - path: "{{ dir_home }}{{ dir_data_config_suffix }}/sonarr" - state: directory - mode: '0755' - - - name: Install/Upgrade the sonarr chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: sonarr - chart_name: sonarr - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - {{ helm_common_persistence_config }},\ - persistence.config.hostPath={{ dir_minikube_mount }}{{ dir_data_config_suffix }}/sonarr,\ - ingress.main.hosts[0].host='sonarr.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=sonarr,\ - ingress.main.hosts[0].paths[0].service.port=8989" - - - debug: - msg: > - You can log into sonarr at 'sonarr.{{ domain_name }}'. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application to download things. Downloading in - the directories under {{ dir_data }} will be picked up by Jellyfin. - - - name: Install bazarr - when: charts.services.bazarr.enabled - block: - - name: Create config directory on hostpath for bazarr - file: - path: "{{ dir_home }}{{ dir_data_config_suffix }}/bazarr" - state: directory - mode: '0755' - - - name: Install/Upgrade the bazarr chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: bazarr - chart_name: bazarr - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - {{ helm_common_persistence_config }},\ - persistence.config.hostPath={{ dir_minikube_mount }}{{ dir_data_config_suffix }}/bazarr,\ - ingress.main.hosts[0].host='bazarr.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=bazarr,\ - ingress.main.hosts[0].paths[0].service.port=6767" - - - debug: - msg: > - You can log into bazarr at 'bazarr.{{ domain_name }}'. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application. - - - name: Install readarr - when: charts.services.readarr.enabled - block: - - name: Create config directory on hostpath for readarr - file: - path: "{{ dir_home }}{{ dir_data_config_suffix }}/readarr" - state: directory - mode: '0755' - - - name: Install/Upgrade the readarr chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: readarr - chart_name: readarr - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - {{ helm_common_persistence_config }},\ - persistence.config.hostPath={{ dir_minikube_mount }}{{ dir_data_config_suffix }}/readarr,\ - ingress.main.hosts[0].host='readarr.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=readarr,\ - ingress.main.hosts[0].paths[0].service.port=8787" - - - debug: - msg: > - You can log into readarr at 'readarr.{{ domain_name }}'. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application. - - - name: Install lidarr - when: charts.services.lidarr.enabled - block: - - name: Create config directory on hostpath for lidarr - file: - path: "{{ dir_home }}{{ dir_data_config_suffix }}/lidarr" - state: directory - mode: '0755' - - - name: Install/Upgrade the lidarr chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: lidarr - chart_name: lidarr - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - {{ helm_common_persistence_config }},\ - persistence.config.hostPath={{ dir_minikube_mount }}{{ dir_data_config_suffix }}/lidarr,\ - ingress.main.hosts[0].host='lidarr.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=lidarr,\ - ingress.main.hosts[0].paths[0].service.port=8686" - - - debug: - msg: > - You can log into lidarr at 'lidarr.{{ domain_name }}'. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application. - - - name: Install ombi - when: charts.services.ombi.enabled - block: - - name: Install/Upgrade the ombi chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: ombi - chart_name: ombi - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_security_contexts }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - ingress.main.hosts[0].host='ombi.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=ombi,\ - ingress.main.hosts[0].paths[0].service.port=3579" - - - debug: - msg: > - You can log into ombi at 'ombi.{{ domain_name }}'. - '{{ dir_home }}' is available under '/media' and can be used by - ombi. If need be, delete any existing server and go to the - URL mention above once again to setup a new server. - - - name: Install librespeed - when: charts.services.librespeed.enabled - block: - - name: Install/Upgrade the librespeed chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: librespeed - chart_name: librespeed - # PUID={{ uid }} gives write access on the pod - # 568 is the default user ID, added to the groups cause why not - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - securityContext.container.PUID=\"{{ uid }}\",\ - securityContext.container.PGID=\"568\",\ - ingress.main.hosts[0].host='librespeed.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=librespeed,\ - ingress.main.hosts[0].paths[0].service.port=10016" - - - debug: - msg: > - You can log into librespeed at 'librespeed.{{ domain_name }}'. - - - name: Install calibre-web - when: charts.services.calibre_web.enabled - block: - - name: Install/Upgrade the calibre-web chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: calibre-web - chart_name: calibre-web - # PUID={{ uid }} gives write access on the pod - # 568 is the default user ID, added to the groups cause why not - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - {{ helm_common_ingress }},\ - securityContext.container.PUID=\"{{ uid }}\",\ - securityContext.container.PGID=\"568\",\ - ingress.main.hosts[0].host='calibre-web.{{ domain_name }}',\ - ingress.main.hosts[0].paths[0].service.name=calibre-web,\ - ingress.main.hosts[0].paths[0].service.port=8083" - - - debug: - msg: > - You can log into calibre-web at 'calibre-web.{{ domain_name }}'. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application. - - - name: Install calibre - when: charts.services.calibre.enabled - block: - - name: Install/Upgrade the calibre chart - include_tasks: tasks-install-chart.yaml - vars: - repo_name: TrueCharts - repo_link: https://charts.truecharts.org - install_namespace: "{{ namespace_generic_services }}" - timeout: "{{ charts.timeout }}" - release_name: calibre - chart_name: calibre - # PUID={{ uid }} gives write access on the pod - # 568 is the default user ID, added to the groups cause why not - # securityContext.container.seccompProfile.type is required for the - # guacamole VNC client to be able to make sys calls (required ) - set_options: "--set \ - {{ helm_common_general }},\ - {{ helm_common_persistence }},\ - {{ helm_common_persistence_media }},\ - {{ helm_common_resources }},\ - securityContext.container.PUID=\"{{ uid }}\",\ - securityContext.container.PGID=\"568\",\ - securityContext.container.seccompProfile.type=Unconfined,\ - service.webserver.enabled=true" - - - name: Expose calibre service - include_tasks: tasks-kubernetes-port-forward-service.yaml - vars: - service_name: calibre - service_file_name: calibre.service - service_file_location_base: /etc/systemd/system/ - service_namespace: "{{ namespace_generic_services }}" - host_port: 3002 - - - include_tasks: tasks-allow-ports.yaml - vars: - ports: - - "3002" - - - debug: - msg: > - You can log into calibre at '{{ ip }}:3002'. - '{{ dir_home }}' from the host is available under '/media' and - can be used by the application. diff --git a/install-cn-basics.yaml b/install-cn-basics.yaml deleted file mode 100644 index ce7e7e2..0000000 --- a/install-cn-basics.yaml +++ /dev/null @@ -1,100 +0,0 @@ ---- -- name: Install docker, kubectl and helm - hosts: all - gather_facts: false - tasks: - - name: Install docker - # https://docs.docker.com/engine/install/ubuntu/ - block: - - name: Download convinience script - shell: echo $PWD && curl -fsSL https://get.docker.com -o get-docker.sh - - - name: Run convinience script - become: true - shell: sh get-docker.sh - - - name: Delete convinience script - file: - path: get-docker.sh - state: absent - - - name: Adding existing user "{{ ansible_user }}" to group docker - become: true - user: - name: "{{ ansible_user }}" - groups: docker - append: yes - - # maybe not needed - - name: "Edit grub file to allow cgroup to set memory" - become: true - lineinfile: - path: "/etc/default/grub" - regexp: '^GRUB_CMDLINE_LINUX=(.*)$' - line: 'GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"' - backrefs: yes - - # maybe not needed - - name: Update grub - become: true - shell: update-grub - - - name: Reset ssh connection to allow user changes to affect ansible user - meta: - reset_connection - - - name: Install kubectl - # https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ - block: - - name: Install Prerequisits - become: true - apt: - update_cache: yes - pkg: - - apt-transport-https - - curl - - ca-certificates - - - name: Download google cloud public signing key - become: true - shell: curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://dl.k8s.io/apt/doc/apt-key.gpg - - - name: Add the Kubernetes apt repository - become: true - shell: echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list - - - name: Install kubectl - become: true - apt: - update_cache: yes - pkg: - - kubectl - - - name: Install helm - # https://helm.sh/docs/intro/install/ - block: - - name: Add repo key - become: true - shell: curl https://baltocdn.com/helm/signing.asc | apt-key add - - - - name: Install Prerequisits - become: true - apt: - update_cache: yes - pkg: - - apt-transport-https - - - name: Add repository - become: true - shell: echo "deb https://baltocdn.com/helm/stable/debian/ all main" | tee /etc/apt/sources.list.d/helm-stable-debian.list - - - name: Install helm - become: true - apt: - update_cache: yes - pkg: - - helm - - - name: Unconditionally reboot the machine with all defaults - become: true - reboot: \ No newline at end of file diff --git a/install-docker.yaml b/install-docker.yaml new file mode 100644 index 0000000..499fd3f --- /dev/null +++ b/install-docker.yaml @@ -0,0 +1,16 @@ +--- +# https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible/ansible.md +# https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting_started/getting-started.md +- name: Install docker + hosts: all + gather_facts: true + tasks: + - name: Install Docker + include_role: + name: geerlingguy.docker + apply: + become: True + vars: + docker_edition: "ce" + docker_users: + - "{{ ansible_user }}" diff --git a/install-services.yaml b/install-services.yaml new file mode 100644 index 0000000..fe422ef --- /dev/null +++ b/install-services.yaml @@ -0,0 +1,866 @@ +--- +- name: Install services + hosts: all + gather_facts: true + vars: + helm_kube_prometheus_stack_loki_config: "" # default + + # The following has been set to disable Truecharts own injection + # of manifests for SCALE products I believe either way I dont need it: + # - manifestManager.enabled=false + helm_common_general: "\ + global.metallb.addServiceAnnotations=false,\ + global.traefik.addServiceAnnotations=false,\ + portal.open.enabled=false,\ + operator.verify.enabled=false,\ + workload.main.type=StatefulSet,\ + podOptions.dnsConfig.nameservers={8.8.8.8,8.8.4.4}" + + helm_common_ingress: "\ + ingress.main.enabled=true,\ + ingress.main.primary=true,\ + ingress.main.expandObjectName=true,\ + ingress.main.ingressClassName=nginx,\ + ingress.main.integrations.certManager.enabled=true,\ + ingress.main.integrations.certManager.certificateIssuer=letsencrypt-prod,\ + ingress.main.integrations.traefik.enabled=false,\ + ingress.main.hosts[0].paths[0].path='/',\ + ingress.main.hosts[0].paths[0].pathType='Prefix'" + + helm_common_resources: "\ + resources.requests.cpu={{ charts.resources.requests.cpu }},\ + resources.requests.memory={{ charts.resources.requests.memory }},\ + resources.limits.cpu={{ charts.resources.limits.cpu }},\ + resources.limits.memory={{ charts.resources.limits.memory }}" + + # persistence.config.hostPathType=DirectoryOrCreate doesnt work + # because it creates the dir with root ownership and no write permissions + helm_common_persistence: "\ + persistence.shared.enabled=false,\ + persistence.shm.enabled=false,\ + persistence.temp.enabled=false,\ + persistence.varlogs.enabled=false" + + helm_common_persistence_config: "\ + persistence.config.enabled=true,\ + persistence.config.type=hostPath" + + # readOnlyRootFilesystem=false is needed for the app to be able to take + # backups in /config/Backups + # runAsUser={{ uid }} gives write access on the pod + helm_common_security_contexts: "\ + securityContext.container.readOnlyRootFilesystem=false,\ + securityContext.container.runAsUser={{ uid }},\ + securityContext.container.runAsGroup=568" + + venv_dir: "{{ basics.home_dir }}/.venv-install-charts" + venv_python: "{{ venv_dir }}/bin/python3" + + tasks: + # - name: "Create host path dir for config" + # file: + # path: "{{ services.configs_dir }}" + # state: directory + # mode: '0777' + # become: true # incase the dirs are created at root level + + # - name: Process variable to add disks + # block: + # - name: "Create host path dir" + # file: + # path: "{{ item.host_path }}" + # state: directory + # mode: '0777' + # become: true # incase the dirs are created at root level + # loop: "{{ services.radarr.persistence }}" + # - name: Set facts + # set_fact: + # helm_common_persistence_movies: >- + # {{- helm_common_persistence_movies | default('') + # + 'persistence.movies-' + item.name + '.enabled=true,' + # + 'persistence.movies-' + item.name + '.type=hostPath,' + # + 'persistence.movies-' + item.name + '.hostPath=' + item.host_path + ',' + # + 'persistence.movies-' + item.name + '.mountPath=/data/' + item.name + '/movies,' + # -}} + # loop: "{{ services.radarr.persistence }}" + + # - name: Process variable to add disks + # block: + # - name: "Create host path dir" + # file: + # path: "{{ item.host_path }}" + # state: directory + # mode: '0777' + # become: true # incase the dirs are created at root level + # loop: "{{ services.sonarr.persistence }}" + # - name: Set facts + # set_fact: + # helm_common_persistence_shows: >- + # {{- helm_common_persistence_shows | default('') + # + 'persistence.shows-' + item.name + '.enabled=true,' + # + 'persistence.shows-' + item.name + '.type=hostPath,' + # + 'persistence.shows-' + item.name + '.hostPath=' + item.host_path + ',' + # + 'persistence.shows-' + item.name + '.mountPath=/data/' + item.name + '/shows,' + # -}} + # loop: "{{ services.sonarr.persistence }}" + + # - name: Process variable to add disks + # block: + # - name: "Create host path dir" + # file: + # path: "{{ item.host_path }}" + # state: directory + # mode: '0777' + # become: true # incase the dirs are created at root level + # loop: "{{ services.readarr.persistence }}" + # - name: Set facts + # set_fact: + # helm_common_persistence_books: >- + # {{- helm_common_persistence_books | default('') + # + 'persistence.books-' + item.name + '.enabled=true,' + # + 'persistence.books-' + item.name + '.type=hostPath,' + # + 'persistence.books-' + item.name + '.hostPath=' + item.host_path + ',' + # + 'persistence.books-' + item.name + '.mountPath=/data/' + item.name + '/books,' + # -}} + # loop: "{{ services.readarr.persistence }}" + + # - name: Process variable to add disks + # block: + # - name: "Create host path dir" + # file: + # path: "{{ item.host_path }}" + # state: directory + # mode: '0777' + # become: true # incase the dirs are created at root level + # loop: "{{ services.lidarr.persistence }}" + # - name: Set facts + # set_fact: + # helm_common_persistence_music: >- + # {{- helm_common_persistence_music | default('') + # + 'persistence.music-' + item.name + '.enabled=true,' + # + 'persistence.music-' + item.name + '.type=hostPath,' + # + 'persistence.music-' + item.name + '.hostPath=' + item.host_path + ',' + # + 'persistence.music-' + item.name + '.mountPath=/data/' + item.name + '/music,' + # -}} + # loop: "{{ services.lidarr.persistence }}" + + # - name: Process variable to add disks + # block: + # - name: "Create host path dir" + # file: + # path: "{{ item.host_path }}" + # state: directory + # mode: '0777' + # become: true # incase the dirs are created at root level + # loop: "{{ services.qbittorrent.persistence }}" + # - name: Set facts + # set_fact: + # helm_common_persistence_downloads: >- + # {{- helm_common_persistence_downloads | default('') + # + 'persistence.downloads-' + item.name + '.enabled=true,' + # + 'persistence.downloads-' + item.name + '.type=hostPath,' + # + 'persistence.downloads-' + item.name + '.hostPath=' + item.host_path + ',' + # + 'persistence.downloads-' + item.name + '.mountPath=/data/' + item.name + '/downloads,' + # -}} + # loop: "{{ services.qbittorrent.persistence }}" + + - name: Copy over compose file + ansible.builtin.copy: + src: ./services/ + dest: "{{ basics.home_dir }}" + mode: '0777' + backup: yes + + - name: Install/Upgrade traefik ingress + when: services.traefik.enabled + block: + - name: docker compose traefik down + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down traefik" + - name: docker compose traefik up + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d traefik" + + + - name: Install/Upgrade jellyfin + when: services.jellyfin.enabled + block: + - name: Create config directory on hostpath for jellyfin + file: + path: "{{ services.configs_dir }}/jellyfin" + state: directory + mode: '0777' + become: true + - name: docker compose jellyfin down + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down jellyfin" + - name: docker compose jellyfin up + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d jellyfin" + - debug: + msg: > + You can log into Jellyfin at 'jellyfin.{{ domain_name }}'. + Data directories are available under '/data' and can be used by + Jellyfin. If need be, delete any existing server and go to the + URL mention above once again to setup a new server. + + - name: Install/Upgrade librespeed + when: services.librespeed.enabled + block: + - name: Create config directory on hostpath for librespeed + file: + path: "{{ services.configs_dir }}/librespeed" + state: directory + mode: '0777' + become: true + - name: docker compose librespeed down + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down librespeed" + - name: docker compose librespeed up + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d librespeed" + - debug: + msg: You can log into librespeed at 'librespeed.{{ domain_name }}'. + + - name: Install/Upgrade monitoring + when: services.monitoring.enabled + block: + + - name: Install/Upgrade node exporter + block: + - name: Create config directory on hostpath for node-exporter + file: + path: "{{ services.configs_dir }}/node-exporter" + state: directory + mode: '0777' + become: true + + - name: docker compose node-exporter down + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down node-exporter" + + - name: docker compose node-exporter up + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d node-exporter" + + - name: Install/Upgrade prometheus + block: + - name: Create config directory on hostpath for prometheus + file: + path: "{{ services.configs_dir }}/prometheus" + state: directory + mode: '0777' + become: true + + - name: docker compose prometheus down + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down prometheus" + + - name: docker compose prometheus up + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d prometheus" + + - name: Install/Upgrade grafana + block: + + - name: Create config directory on hostpath for grafana + file: + path: "{{ services.configs_dir }}/grafana" + state: directory + mode: '0777' + become: true + - name: docker compose grafana down + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down grafana" + - name: docker compose grafana up + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d grafana" + + # - name: Install/Upgrade loki + # block: + # - name: docker compose loki down + # shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down loki" + # - name: docker compose loki up + # shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d loki" + + # - name: Install/Upgrade promtail + # block: + # - name: docker compose promtail down + # shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down promtail" + # - name: docker compose promtail up + # shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d promtail" + + - name: Install/Upgrade cadvisor + block: + - name: docker compose cadvisor down + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml down cadvisor" + - name: docker compose cadvisor up + shell: "docker compose -f {{ basics.home_dir }}/compose.yaml up -d cadvisor" + + - debug: + msg: You can login into grafana at 'grafana.{{ domain_name }}'. + + # - name: Install cnpg + # when: services.cnpg.enabled + # block: + # - name: Install/Upgrade the cnpg chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: cnpg + # repo_link: https://cloudnative-pg.github.io/charts + # install_namespace: "{{ namespace_cloudnative_postgres }}" + # timeout: "{{ charts.timeout }}" + # release_name: cnpg + # chart_name: cloudnative-pg + # set_options: "--set \ + # crds.create=true" + + # - name: Install immich + # when: services.immich.enabled + # block: + # - name: Install/Upgrade the immich chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: immich + # chart_name: immich + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # persistence.mlcache.enabled=true,\ + # persistence.mlcache.type=hostPath,\ + # persistence.mlcache.hostPath={{ services.immich.host_path }}/mlcache,\ + # persistence.microcache.enabled=true,\ + # persistence.microcache.type=hostPath,\ + # persistence.microcache.hostPath={{ services.immich.host_path }}/microcache,\ + # persistence.library.enabled=true,\ + # persistence.library.type=hostPath,\ + # persistence.library.hostPath={{ services.immich.host_path }}/library,\ + # persistence.uploads.enabled=true,\ + # persistence.uploads.type=hostPath,\ + # persistence.uploads.hostPath={{ services.immich.host_path }}/uploads,\ + # persistence.backups.enabled=true,\ + # persistence.backups.type=hostPath,\ + # persistence.backups.hostPath={{ services.immich.host_path }}/backups,\ + # persistence.thumbs.enabled=true,\ + # persistence.thumbs.type=hostPath,\ + # persistence.thumbs.hostPath={{ services.immich.host_path }}/thumbs,\ + # persistence.profile.enabled=true,\ + # persistence.profile.type=hostPath,\ + # persistence.profile.hostPath={{ services.immich.host_path }}/profile,\ + # persistence.video.enabled=true,\ + # persistence.video.type=hostPath,\ + # persistence.video.hostPath={{ services.immich.host_path }}/encoded-video,\ + # securityContext.container.runAsUser=0,\ + # securityContext.container.privileged=true,\ + # securityContext.container.runAsNonRoot=false,\ + # securityContext.container.allowPrivilegeEscalation=true,\ + # ingress.main.hosts[0].host='immich.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=immich,\ + # ingress.main.hosts[0].paths[0].service.port=10323" + + # - debug: + # msg: > + # You can log into Immich at 'immich.{{ domain_name }}'. + + # - name: Install home-assistant + # when: services.home_assistant.enabled + # block: + # - name: Install/Upgrade the home-assistant chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: home-assistant + # chart_name: home-assistant + # # not incliuding config persistence as it impacts the init setup + # # specifically the trusted proxies dont get configured blocking + # # ingress access. This is because the configmaps cannot be mounted + # # at the expected location (subdir in the hostpath mount) + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_persistence_downloads }}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # homeassistant.trusted_proxies[0]='10.0.0.0/8',\ + # ingress.main.hosts[0].host='home-assistant.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=home-assistant,\ + # ingress.main.hosts[0].paths[0].service.port=8123" + + # - name: Install qbittorrent + # when: services.qbittorrent.enabled + # block: + # - name: Create config directory on hostpath for qbittorrent + # file: + # path: "{{ services.configs_dir }}/qbittorrent" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the qbittorrent chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: qbittorrent + # chart_name: qbittorrent + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_persistence_downloads }}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/qbittorrent,\ + # ingress.main.hosts[0].host='qbittorrent.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=qbittorrent,\ + # ingress.main.hosts[0].paths[0].service.port=10095" + + # - name: Search for qbittorent pod + # vars: + # ansible_python_interpreter: "{{ venv_python }}" + # kubernetes.core.k8s_info: + # kind: Pod + # label_selectors: + # - app.kubernetes.io/instance = qbittorrent + # - app.kubernetes.io/name = qbittorrent + # register: output + + # - name: Get randomly generated password from qbittorrent pod logs + # shell: "kubectl logs {{ output.resources[0].metadata.name }} | grep -i 'temporary password' | cut -d ':' -f 2 | tr -d ' '" + # register: qbittorrent_random_password + + # - debug: + # msg: > + # You can log into qBittorrent at 'qbittorrent.{{ domain_name }}' using + # "admin/{{ qbittorrent_random_password.stdout }}" as the default creds. + # Change this after deployment. Downloads directory from the host is + # available under '/data' and can be used by the application to + # download things. Downloads in that directory will be relevant + # processing apps (like Radarr, Sonarr, etc if applicable). + + # - name: Install flaresolverr + # when: services.flaresolverr.enabled + # block: + # - name: Install/Upgrade the flaresolverr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: flaresolverr + # chart_name: flaresolverr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_resources }},\ + # persistence.config.enabled=false" + + # - debug: + # msg: > + # You can flaresolverr to bypass Cloudflare's protection + + # - name: Install prowlarr + # when: services.prowlarr.enabled + # block: + # - name: Create config directory on hostpath for prowlarr + # file: + # path: "{{ services.configs_dir }}/prowlarr" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the prowlarr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: prowlarr + # chart_name: prowlarr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/prowlarr,\ + # ingress.main.hosts[0].host='prowlarr.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=prowlarr,\ + # ingress.main.hosts[0].paths[0].service.port=9696" + + # - debug: + # msg: > + # You can log into prowlarr at 'prowlarr.{{ domain_name }}'. Go to this + # URL and add the indexers you wish to use. + + # - name: Install radarr + # when: services.radarr.enabled + # block: + # - name: Create config directory on hostpath for radarr + # file: + # path: "{{ services.configs_dir }}/radarr" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the radarr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: radarr + # chart_name: radarr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_persistence_movies }}\ + # {{ helm_common_persistence_downloads }}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/radarr,\ + # ingress.main.hosts[0].host='radarr.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=radarr,\ + # ingress.main.hosts[0].paths[0].service.port=7878" + + # - debug: + # msg: > + # You can log into radarr at 'radarr.{{ domain_name }}'. + # Data directories from the host are available under '/data' and + # can be used by the application to download things. Downloads in + # that directory will be picked up by Jellyfin. + + # - name: Install sonarr + # when: services.sonarr.enabled + # block: + # - name: Create config directory on hostpath for sonarr + # file: + # path: "{{ services.configs_dir }}/sonarr" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the sonarr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: sonarr + # chart_name: sonarr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_persistence_shows }}\ + # {{ helm_common_persistence_downloads }}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/sonarr,\ + # ingress.main.hosts[0].host='sonarr.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=sonarr,\ + # ingress.main.hosts[0].paths[0].service.port=8989" + + # - debug: + # msg: > + # You can log into sonarr at 'sonarr.{{ domain_name }}'. + # Data directories from the host are available under '/data' and + # can be used by the application to download things. Downloads in + # that directory will be picked up by Jellyfin. + + # - name: Install bazarr + # when: services.bazarr.enabled + # block: + # - name: Create config directory on hostpath for bazarr + # file: + # path: "{{ services.configs_dir }}/bazarr" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the bazarr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: bazarr + # chart_name: bazarr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_persistence_shows }}\ + # {{ helm_common_persistence_movies }}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/bazarr,\ + # metrics.main.enabled=false,\ + # ingress.main.hosts[0].host='bazarr.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=bazarr,\ + # ingress.main.hosts[0].paths[0].service.port=6767" + + # - debug: + # msg: > + # You can log into bazarr at 'bazarr.{{ domain_name }}'. + # Data directories from the host are available under '/data' and + # can be used by the application. + + # - name: Install readarr + # when: services.readarr.enabled + # block: + # - name: Create config directory on hostpath for readarr + # file: + # path: "{{ services.configs_dir }}/readarr" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the readarr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: readarr + # chart_name: readarr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_persistence_books }}\ + # {{ helm_common_persistence_downloads }}}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/readarr,\ + # ingress.main.hosts[0].host='readarr.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=readarr,\ + # ingress.main.hosts[0].paths[0].service.port=8787" + + # - debug: + # msg: > + # You can log into readarr at 'readarr.{{ domain_name }}'. + # Data directories from the host are available under '/data' and + # can be used by the application. + + # - name: Install lidarr + # when: services.lidarr.enabled + # block: + # - name: Create config directory on hostpath for lidarr + # file: + # path: "{{ services.configs_dir }}/lidarr" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the lidarr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: lidarr + # chart_name: lidarr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_persistence_music }}\ + # {{ helm_common_persistence_downloads }}}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/lidarr,\ + # ingress.main.hosts[0].host='lidarr.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=lidarr,\ + # ingress.main.hosts[0].paths[0].service.port=8686" + + # - debug: + # msg: > + # You can log into lidarr at 'lidarr.{{ domain_name }}'. + # Data directories from the host are available under '/data' and + # can be used by the application. + + # - name: Install ombi + # when: services.ombi.enabled + # block: + # - name: Create config directory on hostpath for ombi + # file: + # path: "{{ services.configs_dir }}/ombi" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the ombi chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: ombi + # chart_name: ombi + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/ombi,\ + # ingress.main.hosts[0].host='ombi.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=ombi,\ + # ingress.main.hosts[0].paths[0].service.port=3579" + # - debug: + # msg: > + # You can log into ombi at 'ombi.{{ domain_name }}'. + # Data directories are available under '/data' and can be used by + # ombi. If need be, delete any existing server and go to the + # URL mention above once again to setup a new server. + + # - name: Install jellyseerr + # when: services.jellyseerr.enabled + # block: + # - name: Create config directory on hostpath for jellyseerr + # file: + # path: "{{ services.configs_dir }}/jellyseerr" + # state: directory + # mode: '0777' + # become: true + + # - name: Install/Upgrade the jellyseerr chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: jellyseerr + # chart_name: jellyseerr + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_security_contexts }},\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/jellyseerr,\ + # ingress.main.hosts[0].host='jellyseerr.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=jellyseerr,\ + # ingress.main.hosts[0].paths[0].service.port=5055" + # - debug: + # msg: > + # You can log into jellyseerr at 'jellyseerr.{{ domain_name }}'. + # Data directories are available under '/data' and can be used by + # jellyseerr. If need be, delete any existing server and go to the + # URL mention above once again to setup a new server. + + # - name: Install librespeed + # when: services.librespeed.enabled + # block: + # - name: Install/Upgrade the librespeed chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: librespeed + # chart_name: librespeed + # # PUID={{ uid }} gives write access on the pod + # # 568 is the default user ID, added to the groups cause why not + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # {{ helm_common_persistence_config }},\ + # persistence.config.hostPath={{ services.configs_dir }}/librespeed,\ + # securityContext.container.PUID=\"{{ uid }}\",\ + # securityContext.container.PGID=\"568\",\ + # ingress.main.hosts[0].host='librespeed.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=librespeed,\ + # ingress.main.hosts[0].paths[0].service.port=10016" + + + # - name: Install calibre-web + # when: services.calibre_web.enabled + # block: + # - name: Install/Upgrade the calibre-web chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: calibre-web + # chart_name: calibre-web + # # PUID={{ uid }} gives write access on the pod + # # 568 is the default user ID, added to the groups cause why not + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_persistence_books }}\ + # {{ helm_common_persistence_downloads }}}\ + # {{ helm_common_resources }},\ + # {{ helm_common_ingress }},\ + # securityContext.container.PUID=\"{{ uid }}\",\ + # securityContext.container.PGID=\"568\",\ + # ingress.main.hosts[0].host='calibre-web.{{ domain_name }}',\ + # ingress.main.hosts[0].paths[0].service.name=calibre-web,\ + # ingress.main.hosts[0].paths[0].service.port=8083" + + # - debug: + # msg: > + # You can log into calibre-web at 'calibre-web.{{ domain_name }}'. + # Data directories from the host are available under '/data' and + # can be used by the application. + + # - name: Install calibre + # when: services.calibre.enabled + # block: + # - name: Install/Upgrade the calibre chart + # include_tasks: tasks-install-chart.yaml + # vars: + # repo_name: TrueCharts + # repo_link: oci://tccr.io/truecharts + # install_namespace: "{{ namespace_generic_services }}" + # timeout: "{{ charts.timeout }}" + # release_name: calibre + # chart_name: calibre + # # PUID={{ uid }} gives write access on the pod + # # 568 is the default user ID, added to the groups cause why not + # # securityContext.container.seccompProfile.type is required for the + # # guacamole VNC client to be able to make sys calls (required ) + # set_options: "--set \ + # {{ helm_common_general }},\ + # {{ helm_common_persistence }},\ + # {{ helm_common_persistence_books }}\ + # {{ helm_common_persistence_downloads }}}\ + # {{ helm_common_resources }},\ + # securityContext.container.PUID=\"{{ uid }}\",\ + # securityContext.container.PGID=\"568\",\ + # securityContext.container.seccompProfile.type=Unconfined,\ + # service.main.type=NodePort,\ + # service.main.ports.main.nodePort=30000,\ + # service.webserver.enabled=true" + + # - debug: + # msg: > + # You can log into calibre at '{{ ip }}:30000'. + # Data directories from the host are available under '/data' and + # can be used by the application. diff --git a/install-tightvnc-and-ssh.yaml b/install-tightvnc-and-ssh.yaml index e1dd934..59d935f 100644 --- a/install-tightvnc-and-ssh.yaml +++ b/install-tightvnc-and-ssh.yaml @@ -58,22 +58,22 @@ line: "Group={{ ansible_user }}" backrefs: yes - - name: "Service file edit: Change WorkingDirectory to be {{ dir_home }}" + - name: "Service file edit: Change WorkingDirectory to be {{ vnc.working_dir }}" become: True # https://gist.github.com/drmalex07/c0f9304deea566842490 lineinfile: path: "{{ service_file_location }}" regexp: '^WorkingDirectory=(.*)$' - line: "WorkingDirectory={{ dir_home }}" + line: "WorkingDirectory={{ vnc.working_dir }}" backrefs: yes - - name: "Service file edit: Change WorkingDirectory to be {{ dir_home }}/.vnc/%H:%i.pid" + - name: "Service file edit: Change WorkingDirectory to be {{ vnc.working_dir }}/.vnc/%H:%i.pid" become: True # https://gist.github.com/drmalex07/c0f9304deea566842490 lineinfile: path: "{{ service_file_location }}" regexp: '^PIDFile=(.*)$' - line: "PIDFile={{ dir_home }}/.vnc/%H:%i.pid" + line: "PIDFile={{ vnc.working_dir }}/.vnc/%H:%i.pid" backrefs: yes - name: Reload systemd service @@ -81,9 +81,3 @@ vars: service_name: "vncserver@{{ vnc.display }}.service" become: True - - - include_tasks: tasks-allow-ports.yaml # should already work - vars: - ports: - - "ssh" - - "590{{ vnc.display }}" diff --git a/minikube_config/minikube.service b/minikube_config/minikube.service deleted file mode 100644 index 11ff361..0000000 --- a/minikube_config/minikube.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=minikube -StartLimitInterval=0 -After=docker.service - -[Service] -Type=simple -RemainAfterExit=no -ExecStartPre=/usr/bin/minikube start -ExecStart=PLACEHOLDER -Restart=always -RestartSec=5 -User=PLACEHOLDER -TimeoutSec=36000 - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/remote-access-minikube.yaml b/remote-access-minikube.yaml deleted file mode 100644 index 03b10f8..0000000 --- a/remote-access-minikube.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: Provide information for remote access of Minikube - hosts: all - gather_facts: false - vars: - local_kube_config_name: "minikube_server.kubeconfig" - local_kube_config_location: "minikube_server.kubeconfig" - remote_kube_config_location: "{{ dir_home }}/.kube/config" - remote_temp_file_location: "/tmp/minikube_config" - local_location: "./" - protocol: "http" - - tasks: - - name: "Copy kubeconfig to {{ remote_temp_file_location }}" - copy: - src: "{{ remote_kube_config_location }}" - dest: "{{ remote_temp_file_location }}" - mode: 0644 - remote_src: true - - - name: "Edit kubeconfig file to change IP to '{{ protocol }}://{{ ip }}:{{ api_server_forwarded_port }}'" - become: true - replace: - path: "{{ remote_temp_file_location }}" - regexp: 'https.*' - replace: "{{ protocol }}://{{ ip }}:{{ api_server_forwarded_port }}" - - - name: "Copy {{ remote_temp_file_location }} to current directory" - fetch: - src: "{{ remote_temp_file_location }}" - dest: "{{ local_kube_config_location }}" - flat: true - mode: 0644 - - - name: "Delete {{ remote_temp_file_location }}" - file: - path: "{{ remote_temp_file_location }}" - state: absent - - - include_tasks: tasks-allow-ports.yaml - vars: - ports: - - "{{ api_server_forwarded_port }}" - - - debug: - msg: > - Minikube is accessible at {{ ip }}:{{ api_server_forwarded_port }}, - please use the kubeconfig from '{{ local_kube_config_location }}' to - access it. - \ No newline at end of file diff --git a/run.sh b/run.sh old mode 100644 new mode 100755 index 9c958ef..fbfbd4b --- a/run.sh +++ b/run.sh @@ -1 +1,20 @@ -ansible-playbook setup.yaml -i hosts.yaml $@ \ No newline at end of file +set -e + +VENV_DIR=".server-venv" + +echo "creating python virtual env" +python3 -m venv ${VENV_DIR} + +echo "activating python virtual env" +source ${VENV_DIR}/bin/activate + +if ! command -v "ansible-playbook --version" 2>&1 >/dev/null; then + echo "installing ansible..." + pip install ansible +fi + +echo "installing docker role..." +ansible-galaxy role install geerlingguy.docker + +echo "running playbook" +ansible-playbook setup.yaml -i hosts.yaml $@ diff --git a/server-basics.yaml b/server-basics.yaml index d2e1445..797ad32 100644 --- a/server-basics.yaml +++ b/server-basics.yaml @@ -40,21 +40,20 @@ state: latest ignore_errors: yes loop: - - git - - curl - - vim - - python3 - - python3-pip + - git # git + - curl # curl webpage + - vim # text editor + - python3 # python + - python3-pip # pip - htop # for top resource usage - iotop # for top io usage - lm-sensors - - stress - - vlc - - jq - - net-tools - - speedtest-cli + - stress # stress tests + - vlc # media player + - jq # json parse + - net-tools # for ifconfig + - speedtest-cli # for speedtests - w3m - - ufw #firewall - fio # for disk tests - mesa-utils # for GPU tests - cpustat # for CPU logs @@ -64,6 +63,10 @@ - intel-gpu-tools # for intel GPU usage, MIGHT NOT BE FOUND - firefox # browser - testdisk # life saver if your disk is a bit sus, so its a nice util to have handy + - tmux # terminal mulitplexor to mainly allow you to create a re-attachable session on the server. Google for more info on how to use + - smartmontools # to be able to use smartctl to get SMART data about storage devices + - hwinfo # hardware info + - python3-venv # to create venvs for pip installs - name: Install kubectx/kubens # https://github.com/ahmetb/kubectx @@ -76,17 +79,10 @@ file: path: "{{ item }}" state: directory - mode: '0755' + mode: '0777' with_items: - - "{{ dir_data }}" - - "{{ dir_repos }}" - - "{{ dir_data_movies }}" - - "{{ dir_data_shows }}" - - "{{ dir_data_games }}" - - "{{ dir_data_books }}" - - "{{ dir_data_music }}" - - "{{ dir_data_downloads }}" - - "{{ dir_home }}{{ dir_data_config_suffix }}" + - "{{ basics.repos_dir }}" + become: true # incase the dirs created are at root level - name: Setup the bashrc file and the vimrc file block: @@ -94,19 +90,24 @@ include_tasks: tasks-clone-git-repo.yaml vars: repo_dir: "{{ useful_files_repo_dir }}" - repo_link: https://github.com/Kimi450/useful_files.git + repo_link: https://github.com/Kimi450/useful-files.git + + - name: combine bashrc + ansible.builtin.shell: |- + cat {{ useful_files_repo_dir }}/.genericrc >> {{ useful_files_repo_dir }}/bash/.bashrc + register: hosts_contents - name: Transfer bashrc file copy: remote_src: True - src: "{{ useful_files_repo_dir }}/.bashrc" - dest: "{{ dir_home }}" + src: "{{ useful_files_repo_dir }}/bash/.bashrc" + dest: "{{ basics.home_dir }}" - name: Transfer vimrc edit script copy: remote_src: True src: "{{ useful_files_repo_dir }}/vimmer.sh" - dest: "{{ dir_home }}" + dest: "{{ basics.home_dir }}" mode: "0755" - name: Run vimrc edit script @@ -116,4 +117,9 @@ become: true command: "{{ useful_files_repo_dir }}/vimmer.sh" vars: - useful_files_repo_dir: "{{ dir_repos }}/useful_files" + useful_files_repo_dir: "{{ basics.repos_dir }}/useful_files" + + - name: reboot + reboot: + become: true + when: basics.reboot diff --git a/services/.env b/services/.env new file mode 100644 index 0000000..c25ed8b --- /dev/null +++ b/services/.env @@ -0,0 +1,9 @@ +GID=3000 +UID=3000 + +DOMAIN_NAME= + +CONFIG_DIR=/home/kimi450/data/app-configs +CACHE_DIR=/home/kimi450/data/app-cache + +MEDIA_DIR=/home/kimi450/data \ No newline at end of file diff --git a/services/compose.yaml b/services/compose.yaml new file mode 100644 index 0000000..bbccebd --- /dev/null +++ b/services/compose.yaml @@ -0,0 +1,218 @@ +services: +# loki: +# image: grafana/loki:latest +# command: -config.file=/etc/loki/local-config.yaml + +# # https://github.com/grafana/loki/issues/333 +# promtail: +# image: grafana/promtail:latest +# volumes: +# - /var/log:/var/log +# - /var/lib/docker:/var/lib/docker:ro +# - ./promtail-config.yaml:/etc/promtail/promtail-config.yml +# command: -config.file=/etc/promtail/promtail-config.yml + + + + # https://github.com/google/cadvisor/issues/2523 + # https://medium.com/@sohammohite/docker-container-monitoring-with-cadvisor-prometheus-and-grafana-using-docker-compose-b47ec78efbc + cadvisor: + container_name: cadvisor + image: gcr.io/cadvisor/cadvisor:latest + command: + - '--housekeeping_interval=15s' # TODO configure + - "--docker_only=true" + # - "--store_container_labels=false" + # - "--disable_metrics=percpu,sched,tcp,udp,disk,diskIO,hugetlb,referenced_memory,cpu_topology,resctrl" + volumes: + - "/:/rootfs" + - "/var/run:/var/run" + - "/sys:/sys" + - "/var/lib/docker/:/var/lib/docker" + - "/dev/disk/:/dev/disk" + privileged: true + devices: + - "/dev/kmsg" + + # https://github.com/bluepuma77/traefik-best-practice/blob/main/docker-traefik-dashboard-letsencrypt/docker-compose.yml + # https://doc.traefik.io/traefik/user-guides/docker-compose/acme-tls/ + traefik: + image: "traefik:v3.3" + container_name: "traefik" + command: + #- --log.filepath=/var/log/traefik.log + - --log.level=DEBUG + - --accesslog=true + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--api.insecure=false" + - --api.dashboard=true + #- --accesslog.filepath=/var/log/traefik-access.log + - --entrypoints.web.address=:80 + - --entrypoints.web.http.redirections.entrypoint.to=websecure + - --entryPoints.web.http.redirections.entrypoint.scheme=https + - --entrypoints.websecure.address=:443 + # remove next line when using Traefik v2 + - --entrypoints.websecure.asDefault=true + - --entrypoints.websecure.http.tls.certresolver=myresolver + - "--certificatesresolvers.myresolver.acme.tlschallenge=true" + - "--certificatesresolvers.myresolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory" + - "--certificatesresolvers.myresolver.acme.email=kimi450@yahoo.com" + - "--certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json" + ports: + - "443:443" + - "80:80" + volumes: + - "${CONFIG_DIR}/letsencrypt:/letsencrypt" + - "/var/run/docker.sock:/var/run/docker.sock:ro" + # networks: + # - monitoring + + node-exporter: + image: prom/node-exporter:latest + container_name: node-exporter + privileged: true + restart: unless-stopped + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + # TODO mounted dirs + command: + - '--path.procfs=/host/proc' + - '--path.rootfs=/rootfs' + # TODO mounteddirs + - '--path.sysfs=/host/sys' + - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' + # networks: + # - monitoring + + + prometheus: + image: prom/prometheus:latest + container_name: prometheus + restart: unless-stopped + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + - prometheus_data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--web.enable-lifecycle' + # networks: + # - monitoring + + + + # https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/prometheus-config-examples/docker-compose-linux/ + grafana: + image: grafana/grafana-enterprise:latest + container_name: grafana + restart: unless-stopped + # if you are running as root then set it to 0 + # else find the right id with the id -u command + user: '0' # TODO change + # adding the mount volume point which we create earlier + volumes: # TODO change + - ./grafana.yml:/etc/grafana/provisioning/datasources/grafana.yml + - '${CONFIG_DIR}/grafana/data:/var/lib/grafana' + labels: + - "traefik.enable=true" + - "traefik.http.routers.grafana.rule=Host(`grafana.${DOMAIN_NAME}`)" + environment: + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_SECURITY_ADMIN_USER=admin # TODO change + - GF_SECURITY_ADMIN_PASSWORD=admin # TODO change + - GF_FEATURE_TOGGLES_ENABLE=alertingSimplifiedRouting,alertingQueryAndExpressionsStepMode + + # networks: + # - monitoring + + + + + # https://hub.docker.com/r/linuxserver/librespeed + librespeed: + image: lscr.io/linuxserver/librespeed:latest + container_name: librespeed + environment: + - PUID=1000 + - PGID=1000 + - TZ=UTC + # - PASSWORD=PASSWORD + # - CUSTOM_RESULTS=false #optional + # - DB_TYPE=sqlite #optional + # - DB_NAME=DB_NAME #optional + # - DB_HOSTNAME=DB_HOSTNAME #optional + # - DB_USERNAME=DB_USERNAME #optional + # - DB_PASSWORD=DB_PASSWORD #optional + # - DB_PORT=DB_PORT #optional + # - IPINFO_APIKEY=ACCESS_TOKEN #optional + volumes: + - ${CONFIG_DIR}/librespeed/:/config + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.librespeed.rule=Host(`librespeed.${DOMAIN_NAME}`)" + + # https://jellyfin.org/docs/general/installation/container/#using-docker-compose + jellyfin: + image: jellyfin/jellyfin + container_name: jellyfin + user: ${UID}:${GID} + volumes: + - ${CONFIG_DIR}/jellyfin:/config + - ${CACHE_DIR}/cache:/cache + - type: bind + source: ${MEDIA_DIR} + target: /data + - type: bind + source: /dev/dri + target: /dev/dri + restart: 'unless-stopped' + labels: + - "traefik.enable=true" + - "traefik.http.routers.jellyfin.rule=Host(`jellyfin.${DOMAIN_NAME}`)" + + # sonarr: + # image: lscr.io/linuxserver/sonarr:latest + # container_name: sonarr + # environment: + # - PUID=1000 + # - PGID=1000 + # - TZ=Etc/UTC + # volumes: + # - ${CONFIG_DIR}/jellyfin:/config + # - /home/kimi450/data:/data + # ports: + # - 8989:8989 + # restart: unless-stopped + # env_file: + # - path: .env + # required: true # default + # labels: + # - homepage.group=Media Automation + # - homepage.name=Sonarr + # - homepage.icon=sonarr.svg + # - homepage.href=https://sonarr.local.haak.pw + # - homepage.description=TV show tracker + # - homepage.widget.type=sonarr + # - homepage.widget.key=${SONARR_API_KEY} + # - homepage.widget.url=http://192.168.177.25:8989 + # # - "traefik.enable=true" + # # - "traefik.http.routers.sonarr.rule=Host(`sonarr.local.haak.pw`)" + # # - "traefik.http.routers.sonarr.entrypoints=https" + # # - "traefik.http.routers.sonarr.tls.certresolver=cloudflare" + # # - "traefik.http.services.sonarr.loadbalancer.server.port=8989" + +networks: + # monitoring: + proxy: + external: true + +volumes: + prometheus_data: {} \ No newline at end of file diff --git a/services/grafana.yml b/services/grafana.yml new file mode 100644 index 0000000..391a247 --- /dev/null +++ b/services/grafana.yml @@ -0,0 +1,19 @@ +apiVersion: 1 + +datasources: +- name: Prometheus + type: prometheus + url: http://prometheus:9090 + isDefault: true + access: proxy + editable: true + scrape_interval: 1s # TODO configure +# - name: Loki +# type: loki +# access: proxy +# orgId: 1 +# url: http://loki:3100 +# basicAuth: false +# isDefault: false +# version: 1 +# editable: false diff --git a/services/prometheus.yml b/services/prometheus.yml new file mode 100644 index 0000000..f367f14 --- /dev/null +++ b/services/prometheus.yml @@ -0,0 +1,13 @@ +global: + scrape_interval: 1s # TODO configure + +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + - job_name: "cadvisor" + static_configs: + - targets: ["cadvisor:8080"] + - job_name: 'node' + static_configs: + - targets: ['node-exporter:9100'] diff --git a/services/promtail-config.yaml b/services/promtail-config.yaml new file mode 100644 index 0000000..a3df81c --- /dev/null +++ b/services/promtail-config.yaml @@ -0,0 +1,51 @@ +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + +- job_name: system + static_configs: + - targets: + - localhost + labels: + job: varlogs + __path__: /var/log/*log + +- job_name: containers + static_configs: + - targets: + - localhost + labels: + job: containerlogs + __path__: /var/lib/docker/containers/*/*log + + pipeline_stages: + - json: + expressions: + log: log + stream: stream + time: time + compose_project: attrs."com.docker.compose.project" + compose_service: attrs."com.docker.compose.service" + stack_name: attrs."com.docker.stack.namespace" + tag: attrs.tag + - regex: + expression: "^/var/lib/docker/containers/(?P.{12}).+/.+-json.log$" + source: filename + - timestamp: + format: RFC3339Nano + source: time + - labels: + stream: + container_id: + tag: + compose_project: + compose_service: + stack_name: diff --git a/setup.yaml b/setup.yaml index a0bd03a..d88c1e2 100644 --- a/setup.yaml +++ b/setup.yaml @@ -7,50 +7,21 @@ shell: "id -u `whoami`" register: uid_output - - name: Get architecture - command: dpkg --print-architecture - register: architecture_output - - name: Set shared facts set_fact: uid: "{{ uid_output.stdout }}" - user: "{{ ansible_user }}" - architecture: "{{ architecture_output.stdout }}" - dir_minikube_mount: "/minikube-host" - dir_mount_path: "/media" - dir_home: "/home/{{ ansible_user }}" - dir_data: "/home/{{ ansible_user }}/data" - dir_repos: "/home/{{ ansible_user }}/repos" - dir_data_movies: "/home/{{ ansible_user }}/data/movies" - dir_data_shows: "/home/{{ ansible_user }}/data/shows" - dir_data_games: "/home/{{ ansible_user }}/data/games" - dir_data_books: "/home/{{ ansible_user }}/data/books" - dir_data_music: "/home/{{ ansible_user }}/data/music" - dir_data_downloads: "/home/{{ ansible_user }}/data/downloads" - dir_data_config_suffix: "/data/app-configs" - import_playbook: server-basics.yaml when: - basics.enabled -- import_playbook: install-cn-basics.yaml +- import_playbook: install-docker.yaml when: - - minikube.enabled or cloud_native.enabled + - docker.enabled -- import_playbook: install-and-configure-minikube.yaml - vars: - api_server_forwarded_port: "3001" - when: - - cloud_native.enabled - - minikube.enabled - -- import_playbook: install-charts.yaml +- import_playbook: install-services.yaml vars: ip: "{{ ansible_host }}" - when: - - cloud_native.enabled or charts.enabled - - minikube.enabled or charts.enabled - - charts.enabled - import_playbook: install-tightvnc-and-ssh.yaml when: @@ -68,15 +39,6 @@ when: - cloudflare.enabled -- import_playbook: remote-access-minikube.yaml - vars: - api_server_forwarded_port: "3001" - ip: "{{ ansible_host }}" - when: - - cloud_native.enabled - - minikube.enabled - - minikube.remote_access.enabled - - import_playbook: install-and-configure-pinless-bluetooth-pairing.yaml when: - bluetooth.pinless.enabled diff --git a/tasks-allow-ports.yaml b/tasks-allow-ports.yaml deleted file mode 100644 index 5efec00..0000000 --- a/tasks-allow-ports.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Enable firewall - become: True - ufw: - state: enabled - -- name: Allow port - become: true - ufw: - rule: allow - port: "{{ item }}" - with_items: - - "{{ ports }}" - register: allow_port_ufw_output - ignore_errors: True - -- name: Allow port on ufw module's failure to do so - become: true - shell: "ufw allow {{ item.item }}" - when: item.failed - with_items: - - "{{ allow_port_ufw_output.results }}" diff --git a/tasks-install-chart.yaml b/tasks-install-chart.yaml deleted file mode 100644 index a800545..0000000 --- a/tasks-install-chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- name: Install {{ release_name }} - block: - - name: "Add helm repo {{ repo_name }} {{ repo_link }}" - shell: "helm repo add {{ repo_name }} {{ repo_link }}" - ignore_errors: True - - - name: Update helm repo - shell: helm repo update - - - name: Install chart - shell: "helm upgrade -i -n {{ install_namespace }} {{ release_name }} {{ repo_name }}/{{ chart_name }} {{ set_options }} --timeout {{ timeout }} --create-namespace --wait" diff --git a/tasks-kubernetes-delete-kind-instances.yaml b/tasks-kubernetes-delete-kind-instances.yaml deleted file mode 100644 index 8a066f1..0000000 --- a/tasks-kubernetes-delete-kind-instances.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- name: "Get all '{{ kind }}' from namespace '{{ namespace }}'s that contain '{{ contains }}'" - shell: "kubectl get -n {{ namespace }} {{ kind }} --no-headers -o custom-columns=':metadata.name' | grep {{ contains }} | sed -z 's/\\n/ /g;s/ $/\\n/'" - register: output - -- name: "Delete all '{{ kind }}' from namespace '{{ namespace }}' that contain '{{ contains }}'" - shell: "kubectl delete -n {{ namespace }} {{ kind }} {{ output.stdout }}" diff --git a/tasks-kubernetes-port-forward-service.yaml b/tasks-kubernetes-port-forward-service.yaml deleted file mode 100644 index 5af9d9c..0000000 --- a/tasks-kubernetes-port-forward-service.yaml +++ /dev/null @@ -1,64 +0,0 @@ -- name: Get service port for local access - block: - - name: "Convert {{ service_name }} to a NodePort service" - shell: >- - kubectl patch svc -n {{ service_namespace }} {{ service_name }} --type='json' -p '[{"op":"replace","path":"/spec/type","value":"NodePort"}]' - - name: Get all ports of type NodePort - shell: >- - kubectl get svc -n {{ service_namespace }} {{ service_name }} {% raw %} -o go-template='{{if .items}}{{range .items}}{{range.spec.ports}}{{.nodePort}}{{"\n"}}{{end}}{{end}}{{else}}{{range.spec.ports}}{{.nodePort}}{{"\n"}}{{end}}{{end}}' {% endraw %} - register: service_port - when: service_name != "SKIP" - - # TODO what to do if there are multiple nodeports that need to be port forwarded -- name: Setup and enable systemd service for port forwarding - block: - - name: "Copy over the skeleton service file for systemd" - become: True - copy: - src: charts_config/skeleton.service - dest: "{{ service_file_location_base }}{{ service_file_name }}" - - # TODO what if minikube IP changes, the IP is set at deployment time here - # Maybe should be part of the systemd service in a script? - - name: Register minikube ssh-key location - shell: minikube ssh-key - register: dir_minikube_ssh_key - - - name: Register minikube ip - shell: minikube ip - register: minikube_ip - - - name: "Service file edit: Add port forward command for local access" - become: True - # https://gist.github.com/drmalex07/c0f9304deea566842490 - lineinfile: - path: "{{ service_file_location_base }}{{ service_file_name }}" - regexp: '^ExecStart=(.*)$' - line: "ExecStart=ssh -NT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ExitOnForwardFailure=yes -o GatewayPorts=true -i {{ dir_minikube_ssh_key.stdout }} docker@{{ minikube_ip.stdout }} -L {{ host_port }}:0.0.0.0:{{ service_port.stdout }}" - backrefs: yes - when: service_name != "SKIP" - - - name: "Service file edit: Add port forward command for domain access" - become: True - # https://gist.github.com/drmalex07/c0f9304deea566842490 - lineinfile: - path: "{{ service_file_location_base }}{{ service_file_name }}" - regexp: '^ExecStart=(.*)$' - line: "ExecStart=ssh -NT -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ExitOnForwardFailure=yes -o GatewayPorts=true -i {{ dir_minikube_ssh_key.stdout }} docker@{{ minikube_ip.stdout }} -L {{ host_port }}:0.0.0.0:{{ service_port }}" - backrefs: yes - when: service_name == "SKIP" - - - name: "Service file edit: Change User to be {{ ansible_user }}" - become: True - # https://gist.github.com/drmalex07/c0f9304deea566842490 - lineinfile: - path: "{{ service_file_location_base }}{{ service_file_name }}" - regexp: '^User=(.*)$' - line: "User={{ ansible_user }}" - backrefs: yes - - - name: Reload systemd service - include_tasks: tasks-reload-systemd-service.yaml - vars: - service_name: "{{ service_file_name }}" - become: True diff --git a/work_in_progress_playbooks/install-and-configure-ssl-cloudfare.yaml b/work_in_progress_playbooks/install-and-configure-ssl-cloudfare.yaml deleted file mode 100644 index 7d301dc..0000000 --- a/work_in_progress_playbooks/install-and-configure-ssl-cloudfare.yaml +++ /dev/null @@ -1,63 +0,0 @@ ---- -- name: Setup SSL for HTTPS using letsencrypt and certbot - hosts: all - gather_facts: false - vars: - dir_cloudfare_config_ini: "{{ dir_home }}/.secrets/certbot/" - name_cloudfare_config_ini: cloudfare.ini - wildcard_domain_name: "*.{{ domain_name }}" - tasks: - - name: Install snapd - become: true - apt: - update_cache: yes - pkg: - - snapd - state: latest - - - name: Update snapd - snap: - name: core - become: true - - - name: Refresh snapd - shell: snap refresh core - become: true - - - name: Install certbot using snapd - snap: - name: certbot - classic: true - become: true - - - name: Link to certbot binaries - shell: ln -sf /snap/bin/certbot /usr/bin/certbot - become: true - - - name: Confirm plugin containment level - shell: "snap set certbot trust-plugin-with-root=ok" - become: true - - - name: Install correct DNS plugin - snap: - name: "certbot-dns-cloudflare" - classic: true - become: true - - - name: Create a directory for secrets - ansible.builtin.file: - path: "{{ dir_cloudfare_config_ini }}" - state: directory - mode: '0755' - - - name: Create config file for cloudfare - copy: - dest: "{{ dir_cloudfare_config_ini }}{{ name_cloudfare_config_ini }}" - content: | - # Cloudflare API token used by Certbot - dns_cloudflare_api_token = {{ cloudflare.api_token }} - mode: 700 - - - name: Install certs - shell: "certbot certonly -d {{ wildcard_domain_name }} --dns-cloudflare --dns-cloudflare-credentials {{ dir_cloudfare_config_ini }}{{ name_cloudfare_config_ini }}" - become: true