",
-)
-# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
-SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
-# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
-EMAIL_SUBJECT_PREFIX = env(
- "DJANGO_EMAIL_SUBJECT_PREFIX",
- default="[{{ copier__project_name }}]",
-)
-
-# ADMIN
-# ------------------------------------------------------------------------------
-# Django Admin URL regex.
-ADMIN_URL = env("DJANGO_ADMIN_URL")
-
-# Anymail
-# ------------------------------------------------------------------------------
-# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
-INSTALLED_APPS += ["anymail"] # noqa F405
-# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
-# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
-{%- if copier__mail_service == 'Mailgun' %}
-# https://anymail.readthedocs.io/en/stable/esps/mailgun/
-EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
-ANYMAIL = {
- "MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
- "MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
- "MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
-}
-{%- elif copier__mail_service == 'Amazon SES' %}
-# https://anymail.readthedocs.io/en/stable/esps/amazon_ses/
-EMAIL_BACKEND = "anymail.backends.amazon_ses.EmailBackend"
-ANYMAIL = {}
-{%- elif copier__mail_service == 'Other SMTP' %}
-# https://anymail.readthedocs.io/en/stable/esps
-EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
-ANYMAIL = {}
-{%- endif %}
-
-# LOGGING
-# ------------------------------------------------------------------------------
-# https://docs.djangoproject.com/en/dev/ref/settings/#logging
-# See https://docs.djangoproject.com/en/dev/topics/logging for
-# more details on how to customize your logging configuration.
-LOGGING = {
- "version": 1,
- "disable_existing_loggers": True,
- "formatters": {
- "verbose": {
- "format": "%(levelname)s %(asctime)s %(module)s "
- "%(process)d %(thread)d %(message)s"
- }
- },
- "handlers": {
- "console": {
- "level": "DEBUG",
- "class": "logging.StreamHandler",
- "formatter": "verbose",
- }
- },
- "root": {"level": "INFO", "handlers": ["console"]},
- "loggers": {
- "django.db.backends": {
- "level": "ERROR",
- "handlers": ["console"],
- "propagate": False,
- },
- "django.security.DisallowedHost": {
- "level": "ERROR",
- "handlers": ["console"],
- "propagate": False,
- },
- },
-}
-
-{%- if copier__create_nextjs_frontend %}
-# ------------------------------------------------------------------------------
-# CORS settings
-CORS_ALLOWED_ORIGINS = env.list("CORS_ALLOWED_ORIGINS", default=[])
-CORS_ALLOW_CREDENTIALS = env.bool("CORS_ALLOW_CREDENTIALS", default=False)
-{%- endif %}
-
-# Your stuff...
-# ------------------------------------------------------------------------------
diff --git a/template/backend/config/settings/test.py b/template/backend/config/settings/test.py
deleted file mode 100644
index 1aa25e5..0000000
--- a/template/backend/config/settings/test.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-With these settings, tests run faster.
-"""
-
-from .base import * # noqa
-from .base import env
-
-# GENERAL
-# ------------------------------------------------------------------------------
-# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
-SECRET_KEY = env("DJANGO_SECRET_KEY")
-# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
-TEST_RUNNER = "django.test.runner.DiscoverRunner"
-
-# CACHES
-# ------------------------------------------------------------------------------
-# https://docs.djangoproject.com/en/dev/ref/settings/#caches
-CACHES = {
- "default": {
- "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
- "LOCATION": "",
- }
-}
-
-# PASSWORDS
-# ------------------------------------------------------------------------------
-# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
-PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
-
-# TEMPLATES
-# ------------------------------------------------------------------------------
-TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
- (
- "django.template.loaders.cached.Loader",
- [
- "django.template.loaders.filesystem.Loader",
- "django.template.loaders.app_directories.Loader",
- ],
- )
-]
-
-# EMAIL
-# ------------------------------------------------------------------------------
-# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
-EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
-
-# Your stuff...
-# ------------------------------------------------------------------------------
diff --git a/template/backend/config/urls.py b/template/backend/config/urls.py
deleted file mode 100644
index 41d1850..0000000
--- a/template/backend/config/urls.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from django.conf import settings
-from django.conf.urls.static import static
-from django.contrib import admin
-from django.contrib.staticfiles.urls import staticfiles_urlpatterns
-from django.urls import include, path
-from django.views import defaults as default_views
-from django.views.generic import TemplateView
-
-{%- if copier__create_nextjs_frontend %}
-from django.views.decorators.csrf import csrf_exempt
-from strawberry.django.views import GraphQLView
-
-from .schema import schema
-
-{%- endif %}
-
-urlpatterns = [
- path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
- path(
- "about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
- ),
- # Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
- path(settings.ADMIN_URL, admin.site.urls),
- # User management
- path("users/", include("{{ copier__project_slug }}.users.urls", namespace="users")),
- path("accounts/", include("allauth.urls")),
- # Your stuff: custom urls includes go here
-] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
-if settings.DEBUG:
- # Static file serving when using Gunicorn + Uvicorn for local web socket development
- urlpatterns += staticfiles_urlpatterns()
-
-{%- if copier__create_nextjs_frontend %}
-urlpatterns += [
- path("graphql/", csrf_exempt(GraphQLView.as_view(schema=schema))),
-]
-{%- endif %}
-
-if settings.DEBUG:
- # This allows the error pages to be debugged during development, just visit
- # these url in browser to see how these error pages look like.
- urlpatterns += [
- path(
- "400/",
- default_views.bad_request,
- kwargs={"exception": Exception("Bad Request!")},
- ),
- path(
- "403/",
- default_views.permission_denied,
- kwargs={"exception": Exception("Permission Denied")},
- ),
- path(
- "404/",
- default_views.page_not_found,
- kwargs={"exception": Exception("Page not Found")},
- ),
- path("500/", default_views.server_error),
- ]
- if "debug_toolbar" in settings.INSTALLED_APPS:
- import debug_toolbar
-
- urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
diff --git a/template/backend/config/wsgi.py b/template/backend/config/wsgi.py
deleted file mode 100644
index 292dfc6..0000000
--- a/template/backend/config/wsgi.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-WSGI config for {{ copier__project_name }} project.
-
-This module contains the WSGI application used by Django's development server
-and any production WSGI deployments. It should expose a module-level variable
-named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
-this application via the ``WSGI_APPLICATION`` setting.
-
-Usually you will have the standard Django WSGI application here, but it also
-might make sense to replace the whole Django WSGI application with a custom one
-that later delegates to the Django one. For example, you could introduce WSGI
-middleware here, or combine a Django application with an application of another
-framework.
-
-"""
-import os
-import sys
-from pathlib import Path
-
-from django.core.wsgi import get_wsgi_application
-
-# This allows easy placement of apps within the interior
-# {{ copier__project_slug }} directory.
-ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
-sys.path.append(str(ROOT_DIR / "{{ copier__project_slug }}"))
-# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
-# if running multiple sites in the same mod_wsgi process. To fix this, use
-# mod_wsgi daemon mode with each site in its own daemon process, or use
-# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
-os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
-
-# This application object is used by any WSGI server configured to use this
-# file. This includes Django's development server, if the WSGI_APPLICATION
-# setting points here.
-application = get_wsgi_application()
-# Apply WSGI middleware here.
-# from helloworld.wsgi import HelloWorldApplication
-# application = HelloWorldApplication(application)
diff --git a/template/backend/env.example b/template/backend/env.example
deleted file mode 100644
index 611d3a0..0000000
--- a/template/backend/env.example
+++ /dev/null
@@ -1,21 +0,0 @@
-# General settings
-DJANGO_ADMIN_URL=
-DJANGO_SETTINGS_MODULE=config.settings.local
-DJANGO_SECRET_KEY=)!@y(_a9@pbbc8d1yidg9v18m1_y*4o(ha5a076*86_#*zxb43
-DJANGO_ALLOWED_HOSTS=.sixfeetup.com
-
-# AWS Settings
-DJANGO_AWS_ACCESS_KEY_ID=
-DJANGO_AWS_SECRET_ACCESS_KEY=
-DJANGO_AWS_STORAGE_BUCKET_NAME=
-
-# Used with email
-DJANGO_MAILGUN_API_KEY=
-DJANGO_SERVER_EMAIL=
-MAILGUN_SENDER_DOMAIN=
-
-# Security! Better to use DNS for this task, but you can use redirect
-DJANGO_SECURE_SSL_REDIRECT=False
-
-# Are we using Vagrant
-USE_VAGRANT=
diff --git a/template/backend/manage.py b/template/backend/manage.py
deleted file mode 100755
index ca6912d..0000000
--- a/template/backend/manage.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-import os
-import sys
-
-if __name__ == "__main__":
- os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
-
- try:
- from django.core.management import execute_from_command_line
- except ImportError:
- # The above import may fail for some other reason. Ensure that the
- # issue is really that Django is missing to avoid masking other
- # exceptions on Python 2.
- try:
- import django # noqa
- except ImportError:
- raise ImportError(
- "Couldn't import Django. Are you sure it's installed and "
- "available on your PYTHONPATH environment variable? Did you "
- "forget to activate a virtual environment?"
- )
- raise
- execute_from_command_line(sys.argv)
diff --git a/template/backend/requirements/base.in b/template/backend/requirements/base.in
deleted file mode 100644
index a10e45d..0000000
--- a/template/backend/requirements/base.in
+++ /dev/null
@@ -1,29 +0,0 @@
-argon2-cffi
-crispy-bootstrap5
-daphne
-django
-django-allauth
-django-crispy-forms
-django-environ
-django-model-utils
-django-redis
-Pillow
-urllib3
-{% if copier__use_sentry %}
-sentry-sdk[django]
-{% endif %}
-
-{%- if copier__use_celery %}
-celery
-django-celery-beat
-flower
-redis
-{%- endif %}
-
-{%- if copier__create_nextjs_frontend %}
-strawberry-graphql-django
-{%- endif %}
-
-{%- if copier__create_nextjs_frontend %}
-django-cors-headers
-{%- endif %}
diff --git a/template/backend/requirements/local.in b/template/backend/requirements/local.in
deleted file mode 100644
index 45f1c66..0000000
--- a/template/backend/requirements/local.in
+++ /dev/null
@@ -1,9 +0,0 @@
-# Local development dependencies go here
--r base.txt
-
-debugpy
-django-debug-toolbar
-django-extensions
-ipdb
-pydevd-pycharm==243.26053.29
-Werkzeug
diff --git a/template/backend/requirements/production.in b/template/backend/requirements/production.in
deleted file mode 100644
index db8052c..0000000
--- a/template/backend/requirements/production.in
+++ /dev/null
@@ -1,20 +0,0 @@
-# PRECAUTION: avoid production dependencies that aren't in development
-
--r base.txt
-
-gunicorn
-psycopg2
-uvicorn[standard]
-
-# Django
-# ------------------------------------------------------------------------------
-django-storages[boto3] # https://github.com/jschneier/django-storages
-daphne
-
-{%- if copier__mail_service == 'Mailgun' %}
-django-anymail[mailgun] # https://github.com/anymail/django-anymail
-{%- elif copier__mail_service == 'Amazon SES' %}
-django-anymail[amazon_ses] # https://github.com/anymail/django-anymail
-{%- elif copier__mail_service == 'Other SMTP' %}
-django-anymail # https://github.com/anymail/django-anymail
-{%- endif %}
diff --git a/template/backend/requirements/tests.in b/template/backend/requirements/tests.in
deleted file mode 100644
index aa4340b..0000000
--- a/template/backend/requirements/tests.in
+++ /dev/null
@@ -1,15 +0,0 @@
-# Test dependencies go here.
--r base.txt
-
-coverage
-flake8
-django-test-plus
-factory-boy
-django-coverage-plugin
-
-# pytest
-pytest-django
-pytest-cov
-pytest-freezegun
-pytest-mock
-pytest-sugar
diff --git a/template/backend/{{copier__project_slug}}/__init__.py b/template/backend/{{copier__project_slug}}/__init__.py
deleted file mode 100644
index 713e35a..0000000
--- a/template/backend/{{copier__project_slug}}/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-__version__ = "{{ copier__version }}"
-__version_info__ = tuple(
- [
- int(num) if num.isdigit() else num
- for num in __version__.replace("-", ".", 1).split(".")
- ]
-)
diff --git a/template/backend/{{copier__project_slug}}/celery.py b/template/backend/{{copier__project_slug}}/celery.py
deleted file mode 100644
index ccd9b5f..0000000
--- a/template/backend/{{copier__project_slug}}/celery.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import os
-
-from celery import Celery
-
-# set the default Django settings module for the 'celery' program.
-os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
-
-app = Celery("{{ copier__project_slug }}")
-
-# Using a string here means the worker doesn't have to serialize
-# the configuration object to child processes.
-# - namespace='CELERY' means all celery-related configuration keys
-# should have a `CELERY_` prefix.
-app.config_from_object("django.conf:settings", namespace="CELERY")
-
-# Load task modules from all registered Django app configs.
-app.autodiscover_tasks()
diff --git a/template/backend/{{copier__project_slug}}/conftest.py b/template/backend/{{copier__project_slug}}/conftest.py
deleted file mode 100644
index c07f40f..0000000
--- a/template/backend/{{copier__project_slug}}/conftest.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import pytest
-{%- if copier__create_nextjs_frontend %}
-from strawberry_django.test.client import TestClient
-{%- endif %}
-
-from {{ copier__project_slug }}.users.models import User
-from {{ copier__project_slug }}.users.tests.factories import UserFactory
-
-
-@pytest.fixture(autouse=True)
-def media_storage(settings, tmpdir):
- settings.MEDIA_ROOT = tmpdir.strpath
-
-
-@pytest.fixture
-def user() -> User:
- return UserFactory()
-
-
-{% if copier__create_nextjs_frontend -%}
-@pytest.fixture
-def graphql_client() -> TestClient:
- return TestClient("/graphql/")
-{%- endif %}
diff --git a/template/backend/{{copier__project_slug}}/contrib/__init__.py b/template/backend/{{copier__project_slug}}/contrib/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/template/backend/{{copier__project_slug}}/contrib/sites/__init__.py b/template/backend/{{copier__project_slug}}/contrib/sites/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0001_initial.py b/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0001_initial.py
deleted file mode 100644
index 91d4361..0000000
--- a/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0001_initial.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import django.contrib.sites.models
-from django.contrib.sites.models import _simple_domain_name_validator
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
- dependencies = []
-
- operations = [
- migrations.CreateModel(
- name="Site",
- fields=[
- (
- "id",
- models.BigAutoField(
- verbose_name="ID",
- serialize=False,
- auto_created=True,
- primary_key=True,
- ),
- ),
- (
- "domain",
- models.CharField(
- max_length=100,
- verbose_name="domain name",
- validators=[_simple_domain_name_validator],
- ),
- ),
- ("name", models.CharField(max_length=50, verbose_name="display name")),
- ],
- options={
- "ordering": ("domain",),
- "db_table": "django_site",
- "verbose_name": "site",
- "verbose_name_plural": "sites",
- },
- bases=(models.Model,),
- managers=[("objects", django.contrib.sites.models.SiteManager())],
- )
- ]
diff --git a/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0002_alter_domain_unique.py b/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0002_alter_domain_unique.py
deleted file mode 100644
index 4359049..0000000
--- a/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0002_alter_domain_unique.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import django.contrib.sites.models
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
- dependencies = [("sites", "0001_initial")]
-
- operations = [
- migrations.AlterField(
- model_name="site",
- name="domain",
- field=models.CharField(
- max_length=100,
- unique=True,
- validators=[django.contrib.sites.models._simple_domain_name_validator],
- verbose_name="domain name",
- ),
- )
- ]
diff --git a/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0003_set_site_domain_and_name.py b/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0003_set_site_domain_and_name.py
deleted file mode 100644
index 1470aa8..0000000
--- a/template/backend/{{copier__project_slug}}/contrib/sites/migrations/0003_set_site_domain_and_name.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from django.conf import settings
-from django.db import migrations
-
-
-def update_site_forward(apps, schema_editor):
- """Set site domain and name."""
- Site = apps.get_model("sites", "Site")
- Site.objects.update_or_create(
- id=settings.SITE_ID,
- defaults={
- "domain": "{{ copier__domain_name }}",
- "name": "{{ copier__project_name }}",
- },
- )
-
-
-def update_site_backward(apps, schema_editor):
- """Revert site domain and name to default."""
- Site = apps.get_model("sites", "Site")
- Site.objects.update_or_create(
- id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
- )
-
-
-class Migration(migrations.Migration):
- dependencies = [("sites", "0002_alter_domain_unique")]
-
- operations = [migrations.RunPython(update_site_forward, update_site_backward)]
diff --git a/template/backend/{{copier__project_slug}}/contrib/sites/migrations/__init__.py b/template/backend/{{copier__project_slug}}/contrib/sites/migrations/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/template/backend/{{copier__project_slug}}/static/css/project.css b/template/backend/{{copier__project_slug}}/static/css/project.css
deleted file mode 100644
index f1d543d..0000000
--- a/template/backend/{{copier__project_slug}}/static/css/project.css
+++ /dev/null
@@ -1,13 +0,0 @@
-/* These styles are generated from project.scss. */
-
-.alert-debug {
- color: black;
- background-color: white;
- border-color: #d6e9c6;
-}
-
-.alert-error {
- color: #b94a48;
- background-color: #f2dede;
- border-color: #eed3d7;
-}
diff --git a/template/backend/{{copier__project_slug}}/static/fonts/.gitkeep b/template/backend/{{copier__project_slug}}/static/fonts/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/template/backend/{{copier__project_slug}}/static/images/favicons/favicon.ico b/template/backend/{{copier__project_slug}}/static/images/favicons/favicon.ico
deleted file mode 100644
index e1c1dd1..0000000
Binary files a/template/backend/{{copier__project_slug}}/static/images/favicons/favicon.ico and /dev/null differ
diff --git a/template/backend/{{copier__project_slug}}/static/js/project.js b/template/backend/{{copier__project_slug}}/static/js/project.js
deleted file mode 100644
index d26d23b..0000000
--- a/template/backend/{{copier__project_slug}}/static/js/project.js
+++ /dev/null
@@ -1 +0,0 @@
-/* Project specific Javascript goes here. */
diff --git a/template/backend/{{copier__project_slug}}/static/sass/project.scss b/template/backend/{{copier__project_slug}}/static/sass/project.scss
deleted file mode 100644
index ac8348c..0000000
--- a/template/backend/{{copier__project_slug}}/static/sass/project.scss
+++ /dev/null
@@ -1,35 +0,0 @@
-@import "bootstrap";
-
-// project specific CSS goes here
-
-////////////////////////////////
-//Variables//
-////////////////////////////////
-
-// Alert colors
-
-$white: #fff;
-$mint-green: #d6e9c6;
-$black: #000;
-$pink: #f2dede;
-$dark-pink: #eed3d7;
-$red: #b94a48;
-
-////////////////////////////////
-//Alerts//
-////////////////////////////////
-
-// bootstrap alert CSS, translated to the django-standard levels of
-// debug, info, success, warning, error
-
-.alert-debug {
- background-color: $white;
- border-color: $mint-green;
- color: $black;
-}
-
-.alert-error {
- background-color: $pink;
- border-color: $dark-pink;
- color: $red;
-}
diff --git a/template/backend/{{copier__project_slug}}/templates/403.html b/template/backend/{{copier__project_slug}}/templates/403.html
deleted file mode 100644
index c02bd4e..0000000
--- a/template/backend/{{copier__project_slug}}/templates/403.html
+++ /dev/null
@@ -1,9 +0,0 @@
-{% raw %}{% extends "base.html" %}
-
-{% block title %}Forbidden (403){% endblock %}
-
-{% block content %}
-Forbidden (403)
-
-CSRF verification failed. Request aborted.
-{% endblock content %}{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/404.html b/template/backend/{{copier__project_slug}}/templates/404.html
deleted file mode 100644
index 1687ef3..0000000
--- a/template/backend/{{copier__project_slug}}/templates/404.html
+++ /dev/null
@@ -1,9 +0,0 @@
-{% raw %}{% extends "base.html" %}
-
-{% block title %}Page not found{% endblock %}
-
-{% block content %}
-Page not found
-
-This is not the page you were looking for.
-{% endblock content %}{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/500.html b/template/backend/{{copier__project_slug}}/templates/500.html
deleted file mode 100644
index 122e081..0000000
--- a/template/backend/{{copier__project_slug}}/templates/500.html
+++ /dev/null
@@ -1,13 +0,0 @@
-{% raw %}{% extends "base.html" %}
-
-{% block title %}Server Error{% endblock %}
-
-{% block content %}
-Ooops!!! 500
-
-Looks like something went wrong!
-
-We track these errors automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.
-{% endblock content %}
-
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/account_inactive.html b/template/backend/{{copier__project_slug}}/templates/account/account_inactive.html
deleted file mode 100644
index fe9b680..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/account_inactive.html
+++ /dev/null
@@ -1,12 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-
-{% block head_title %}{% trans "Account Inactive" %}{% endblock %}
-
-{% block inner %}
-{% trans "Account Inactive" %}
-
-{% trans "This account is inactive." %}
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/base.html b/template/backend/{{copier__project_slug}}/templates/account/base.html
deleted file mode 100644
index cd07bba..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/base.html
+++ /dev/null
@@ -1,11 +0,0 @@
-{% raw %}{% extends "base.html" %}
-{% block title %}{% block head_title %}{% endblock head_title %}{% endblock title %}
-
-{% block content %}
-
-
- {% block inner %}{% endblock %}
-
-
-{% endblock %}
-{% endraw %}
\ No newline at end of file
diff --git a/template/backend/{{copier__project_slug}}/templates/account/email.html b/template/backend/{{copier__project_slug}}/templates/account/email.html
deleted file mode 100644
index 78d9973..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/email.html
+++ /dev/null
@@ -1,80 +0,0 @@
-{% raw %}
-{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load crispy_forms_tags %}
-
-{% block head_title %}{% trans "Account" %}{% endblock %}
-
-{% block inner %}
-{% trans "E-mail Addresses" %}
-
-{% if user.emailaddress_set.all %}
-{% trans 'The following e-mail addresses are associated with your account:' %}
-
-
-
-{% else %}
-{% trans 'Warning:'%} {% trans "You currently do not have any e-mail address set up. You should really add an e-mail address so you can receive notifications, reset your password, etc." %}
-
-{% endif %}
-
-
- {% trans "Add E-mail Address" %}
-
-
-
-{% endblock %}
-
-
-{% block javascript %}
-{{ block.super }}
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/email_confirm.html b/template/backend/{{copier__project_slug}}/templates/account/email_confirm.html
deleted file mode 100644
index eb45cf6..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/email_confirm.html
+++ /dev/null
@@ -1,32 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load account %}
-
-{% block head_title %}{% trans "Confirm E-mail Address" %}{% endblock %}
-
-
-{% block inner %}
-{% trans "Confirm E-mail Address" %}
-
-{% if confirmation %}
-
-{% user_display confirmation.email_address.user as user_display %}
-
-{% blocktrans with confirmation.email_address.email as email %}Please confirm that {{ email }} is an e-mail address for user {{ user_display }}.{% endblocktrans %}
-
-
-
-{% else %}
-
-{% url 'account_email' as email_url %}
-
-{% blocktrans %}This e-mail confirmation link expired or is invalid. Please issue a new e-mail confirmation request .{% endblocktrans %}
-
-{% endif %}
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/login.html b/template/backend/{{copier__project_slug}}/templates/account/login.html
deleted file mode 100644
index a719322..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/login.html
+++ /dev/null
@@ -1,48 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load account socialaccount %}
-{% load crispy_forms_tags %}
-
-{% block head_title %}{% trans "Sign In" %}{% endblock %}
-
-{% block inner %}
-
-{% trans "Sign In" %}
-
-{% get_providers as socialaccount_providers %}
-
-{% if socialaccount_providers %}
-{% blocktrans with site.name as site_name %}Please sign in with one
-of your existing third party accounts. Or, sign up
-for a {{ site_name }} account and sign in below:{% endblocktrans %}
-
-
-
-
- {% include "socialaccount/snippets/provider_list.html" with process="login" %}
-
-
-
{% trans 'or' %}
-
-
-
-{% include "socialaccount/snippets/login_extra.html" %}
-
-{% else %}
-{% blocktrans %}If you have not created an account yet, then please
-sign up first.{% endblocktrans %}
-{% endif %}
-
-
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/logout.html b/template/backend/{{copier__project_slug}}/templates/account/logout.html
deleted file mode 100644
index baa8183..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/logout.html
+++ /dev/null
@@ -1,22 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-
-{% block head_title %}{% trans "Sign Out" %}{% endblock %}
-
-{% block inner %}
-{% trans "Sign Out" %}
-
-{% trans 'Are you sure you want to sign out?' %}
-
-
-
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/password_change.html b/template/backend/{{copier__project_slug}}/templates/account/password_change.html
deleted file mode 100644
index 62bbbc1..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/password_change.html
+++ /dev/null
@@ -1,17 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load crispy_forms_tags %}
-
-{% block head_title %}{% trans "Change Password" %}{% endblock %}
-
-{% block inner %}
- {% trans "Change Password" %}
-
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/password_reset.html b/template/backend/{{copier__project_slug}}/templates/account/password_reset.html
deleted file mode 100644
index b9869fb..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/password_reset.html
+++ /dev/null
@@ -1,26 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load account %}
-{% load crispy_forms_tags %}
-
-{% block head_title %}{% trans "Password Reset" %}{% endblock %}
-
-{% block inner %}
-
- {% trans "Password Reset" %}
- {% if user.is_authenticated %}
- {% include "account/snippets/already_logged_in.html" %}
- {% endif %}
-
- {% trans "Forgotten your password? Enter your e-mail address below, and we'll send you an e-mail allowing you to reset it." %}
-
-
-
- {% blocktrans %}Please contact us if you have any trouble resetting your password.{% endblocktrans %}
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/password_reset_done.html b/template/backend/{{copier__project_slug}}/templates/account/password_reset_done.html
deleted file mode 100644
index cf2129b..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/password_reset_done.html
+++ /dev/null
@@ -1,17 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load account %}
-
-{% block head_title %}{% trans "Password Reset" %}{% endblock %}
-
-{% block inner %}
- {% trans "Password Reset" %}
-
- {% if user.is_authenticated %}
- {% include "account/snippets/already_logged_in.html" %}
- {% endif %}
-
- {% blocktrans %}We have sent you an e-mail. Please contact us if you do not receive it within a few minutes.{% endblocktrans %}
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/password_reset_from_key.html b/template/backend/{{copier__project_slug}}/templates/account/password_reset_from_key.html
deleted file mode 100644
index 671eb12..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/password_reset_from_key.html
+++ /dev/null
@@ -1,25 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load crispy_forms_tags %}
-{% block head_title %}{% trans "Change Password" %}{% endblock %}
-
-{% block inner %}
- {% if token_fail %}{% trans "Bad Token" %}{% else %}{% trans "Change Password" %}{% endif %}
-
- {% if token_fail %}
- {% url 'account_reset_password' as passwd_reset_url %}
- {% blocktrans %}The password reset link was invalid, possibly because it has already been used. Please request a new password reset .{% endblocktrans %}
- {% else %}
- {% if form %}
-
- {% else %}
- {% trans 'Your password is now changed.' %}
- {% endif %}
- {% endif %}
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/password_reset_from_key_done.html b/template/backend/{{copier__project_slug}}/templates/account/password_reset_from_key_done.html
deleted file mode 100644
index 925b7aa..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/password_reset_from_key_done.html
+++ /dev/null
@@ -1,10 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% block head_title %}{% trans "Change Password" %}{% endblock %}
-
-{% block inner %}
- {% trans "Change Password" %}
- {% trans 'Your password is now changed.' %}
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/password_set.html b/template/backend/{{copier__project_slug}}/templates/account/password_set.html
deleted file mode 100644
index 563a0b1..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/password_set.html
+++ /dev/null
@@ -1,17 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load crispy_forms_tags %}
-
-{% block head_title %}{% trans "Set Password" %}{% endblock %}
-
-{% block inner %}
- {% trans "Set Password" %}
-
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/signup.html b/template/backend/{{copier__project_slug}}/templates/account/signup.html
deleted file mode 100644
index 80490a2..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/signup.html
+++ /dev/null
@@ -1,23 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-{% load crispy_forms_tags %}
-
-{% block head_title %}{% trans "Signup" %}{% endblock %}
-
-{% block inner %}
-{% trans "Sign Up" %}
-
-{% blocktrans %}Already have an account? Then please sign in .{% endblocktrans %}
-
-
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/signup_closed.html b/template/backend/{{copier__project_slug}}/templates/account/signup_closed.html
deleted file mode 100644
index eca9b15..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/signup_closed.html
+++ /dev/null
@@ -1,12 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-
-{% block head_title %}{% trans "Sign Up Closed" %}{% endblock %}
-
-{% block inner %}
-{% trans "Sign Up Closed" %}
-
-{% trans "We are sorry, but the sign up is currently closed." %}
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/verification_sent.html b/template/backend/{{copier__project_slug}}/templates/account/verification_sent.html
deleted file mode 100644
index ccc8d9a..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/verification_sent.html
+++ /dev/null
@@ -1,13 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-
-{% block head_title %}{% trans "Verify Your E-mail Address" %}{% endblock %}
-
-{% block inner %}
- {% trans "Verify Your E-mail Address" %}
-
- {% blocktrans %}We have sent an e-mail to you for verification. Follow the link provided to finalize the signup process. Please contact us if you do not receive it within a few minutes.{% endblocktrans %}
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/account/verified_email_required.html b/template/backend/{{copier__project_slug}}/templates/account/verified_email_required.html
deleted file mode 100644
index f3078b6..0000000
--- a/template/backend/{{copier__project_slug}}/templates/account/verified_email_required.html
+++ /dev/null
@@ -1,24 +0,0 @@
-{% raw %}{% extends "account/base.html" %}
-
-{% load i18n %}
-
-{% block head_title %}{% trans "Verify Your E-mail Address" %}{% endblock %}
-
-{% block inner %}
-{% trans "Verify Your E-mail Address" %}
-
-{% url 'account_email' as email_url %}
-
-{% blocktrans %}This part of the site requires us to verify that
-you are who you claim to be. For this purpose, we require that you
-verify ownership of your e-mail address. {% endblocktrans %}
-
-{% blocktrans %}We have sent an e-mail to you for
-verification. Please click on the link inside this e-mail. Please
-contact us if you do not receive it within a few minutes.{% endblocktrans %}
-
-{% blocktrans %}Note: you can still change your e-mail address .{% endblocktrans %}
-
-
-{% endblock %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/base.html b/template/backend/{{copier__project_slug}}/templates/base.html
deleted file mode 100644
index db8bae7..0000000
--- a/template/backend/{{copier__project_slug}}/templates/base.html
+++ /dev/null
@@ -1,106 +0,0 @@
-{% raw %}{% load static i18n {% endraw %}{% raw %}%}
-
-
-
-
- {% block title %}{% endraw %}{{ copier__project_name }}{% raw %}{% endblock title %}
-
-
-
-
-
-
-
-
-
- {% block css %}
- {% endraw %}{% raw %}
-
-
- {% endraw %}{% raw %}
-
-
-
-
-
- {% endblock %}
-
-
-
-
-
-
-
-
-
- {% if messages %}
- {% for message in messages %}
-
{{ message }}×
- {% endfor %}
- {% endif %}
-
- {% block content %}
-
Welcome to your brand-new Scaf -powered Django project! Your development journey starts here. Explore, build, and innovate with speed and ease.
- {% endblock content %}
-
-
-
- {% block modal %}{% endblock modal %}
-
-
-
- {% block javascript %}
-
-
-
-
-
-
-
-
-
- {% endblock javascript %}
-
-
- {% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/pages/about.html b/template/backend/{{copier__project_slug}}/templates/pages/about.html
deleted file mode 100644
index 94beff9..0000000
--- a/template/backend/{{copier__project_slug}}/templates/pages/about.html
+++ /dev/null
@@ -1 +0,0 @@
-{% raw %}{% extends "base.html" %}{% endraw %}
\ No newline at end of file
diff --git a/template/backend/{{copier__project_slug}}/templates/pages/home.html b/template/backend/{{copier__project_slug}}/templates/pages/home.html
deleted file mode 100644
index 94beff9..0000000
--- a/template/backend/{{copier__project_slug}}/templates/pages/home.html
+++ /dev/null
@@ -1 +0,0 @@
-{% raw %}{% extends "base.html" %}{% endraw %}
\ No newline at end of file
diff --git a/template/backend/{{copier__project_slug}}/templates/users/user_detail.html b/template/backend/{{copier__project_slug}}/templates/users/user_detail.html
deleted file mode 100644
index 47b6112..0000000
--- a/template/backend/{{copier__project_slug}}/templates/users/user_detail.html
+++ /dev/null
@@ -1,36 +0,0 @@
-{% raw %}{% extends "base.html" %}
-{% load static %}
-
-{% block title %}User: {{ object.username }}{% endblock %}
-
-{% block content %}
-
-
-
-
-
-
{{ object.username }}
- {% if object.name %}
-
{{ object.name }}
- {% endif %}
-
-
-
-{% if object == request.user %}
-
-
-
-{% endif %}
-
-
-
-{% endblock content %}
-{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/templates/users/user_form.html b/template/backend/{{copier__project_slug}}/templates/users/user_form.html
deleted file mode 100644
index e9da0d4..0000000
--- a/template/backend/{{copier__project_slug}}/templates/users/user_form.html
+++ /dev/null
@@ -1,17 +0,0 @@
-{% raw %}{% extends "base.html" %}
-{% load crispy_forms_tags %}
-
-{% block title %}{{ user.username }}{% endblock %}
-
-{% block content %}
- {{ user.username }}
-
-{% endblock %}{% endraw %}
diff --git a/template/backend/{{copier__project_slug}}/users/__init__.py b/template/backend/{{copier__project_slug}}/users/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/template/backend/{{copier__project_slug}}/users/adapters.py b/template/backend/{{copier__project_slug}}/users/adapters.py
deleted file mode 100644
index 0d206fa..0000000
--- a/template/backend/{{copier__project_slug}}/users/adapters.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from typing import Any
-
-from allauth.account.adapter import DefaultAccountAdapter
-from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
-from django.conf import settings
-from django.http import HttpRequest
-
-
-class AccountAdapter(DefaultAccountAdapter):
- def is_open_for_signup(self, request: HttpRequest):
- return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
-
-
-class SocialAccountAdapter(DefaultSocialAccountAdapter):
- def is_open_for_signup(self, request: HttpRequest, sociallogin: Any):
- return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
diff --git a/template/backend/{{copier__project_slug}}/users/admin.py b/template/backend/{{copier__project_slug}}/users/admin.py
deleted file mode 100644
index 1e6dd25..0000000
--- a/template/backend/{{copier__project_slug}}/users/admin.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from django.contrib import admin
-from django.contrib.auth import admin as auth_admin
-from django.contrib.auth import get_user_model
-from {{copier__project_slug}}.users.forms import (UserChangeForm,
- UserCreationForm)
-
-User = get_user_model()
-
-
-@admin.register(User)
-class UserAdmin(auth_admin.UserAdmin):
- form = UserChangeForm
- add_form = UserCreationForm
- fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
- add_fieldsets = (
- (None, {
- 'classes': ('wide',),
- 'fields': ('username', 'email', 'name', 'password1', 'password2'),
- }),
- )
- list_display = ["username", "name", "is_superuser"]
- search_fields = ["name"]
diff --git a/template/backend/{{copier__project_slug}}/users/apps.py b/template/backend/{{copier__project_slug}}/users/apps.py
deleted file mode 100644
index 3ac2ce3..0000000
--- a/template/backend/{{copier__project_slug}}/users/apps.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from django.apps import AppConfig
-from django.utils.translation import gettext_lazy as _
-
-
-class UsersConfig(AppConfig):
- name = "{{ copier__project_slug }}.users"
- verbose_name = _("Users")
-
- def ready(self):
- try:
- import {{ copier__project_slug }}.users.signals # noqa F401
- except ImportError:
- pass
diff --git a/template/backend/{{copier__project_slug}}/users/forms.py b/template/backend/{{copier__project_slug}}/users/forms.py
deleted file mode 100644
index cf4b25a..0000000
--- a/template/backend/{{copier__project_slug}}/users/forms.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from django.contrib.auth import forms, get_user_model
-from django.core.exceptions import ValidationError
-from django.utils.translation import gettext_lazy as _
-
-User = get_user_model()
-
-
-class UserChangeForm(forms.UserChangeForm):
- class Meta(forms.UserChangeForm.Meta):
- model = User
-
-
-class UserCreationForm(forms.UserCreationForm):
- error_message = forms.UserCreationForm.error_messages.update(
- {"duplicate_username": _("This username has already been taken.")}
- )
-
- class Meta(forms.UserCreationForm.Meta):
- model = User
-
- def clean_username(self):
- username = self.cleaned_data["username"]
-
- try:
- User.objects.get(username=username)
- except User.DoesNotExist:
- return username
-
- raise ValidationError(self.error_messages["duplicate_username"])
diff --git a/template/backend/{{copier__project_slug}}/users/migrations/0001_initial.py b/template/backend/{{copier__project_slug}}/users/migrations/0001_initial.py
deleted file mode 100644
index b0e6bd5..0000000
--- a/template/backend/{{copier__project_slug}}/users/migrations/0001_initial.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import django.contrib.auth.models
-import django.contrib.auth.validators
-import django.utils.timezone
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
- initial = True
-
- dependencies = [("auth", "0008_alter_user_username_max_length")]
-
- operations = [
- migrations.CreateModel(
- name="User",
- fields=[
- (
- "id",
- models.BigAutoField(
- auto_created=True,
- primary_key=True,
- serialize=False,
- verbose_name="ID",
- ),
- ),
- ("password", models.CharField(max_length=128, verbose_name="password")),
- (
- "last_login",
- models.DateTimeField(
- blank=True, null=True, verbose_name="last login"
- ),
- ),
- (
- "is_superuser",
- models.BooleanField(
- default=False,
- help_text="Designates that this user has all permissions without explicitly assigning them.",
- verbose_name="superuser status",
- ),
- ),
- (
- "username",
- models.CharField(
- error_messages={
- "unique": "A user with that username already exists."
- },
- help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
- max_length=150,
- unique=True,
- validators=[
- django.contrib.auth.validators.UnicodeUsernameValidator()
- ],
- verbose_name="username",
- ),
- ),
- (
- "first_name",
- models.CharField(
- blank=True, max_length=30, verbose_name="first name"
- ),
- ),
- (
- "last_name",
- models.CharField(
- blank=True, max_length=150, verbose_name="last name"
- ),
- ),
- (
- "email",
- models.EmailField(
- blank=True, max_length=254, verbose_name="email address"
- ),
- ),
- (
- "is_staff",
- models.BooleanField(
- default=False,
- help_text="Designates whether the user can log into this admin site.",
- verbose_name="staff status",
- ),
- ),
- (
- "is_active",
- models.BooleanField(
- default=True,
- help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
- verbose_name="active",
- ),
- ),
- (
- "date_joined",
- models.DateTimeField(
- default=django.utils.timezone.now, verbose_name="date joined"
- ),
- ),
- (
- "name",
- models.CharField(
- blank=True, max_length=255, verbose_name="Name of User"
- ),
- ),
- (
- "groups",
- models.ManyToManyField(
- blank=True,
- help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
- related_name="user_set",
- related_query_name="user",
- to="auth.Group",
- verbose_name="groups",
- ),
- ),
- (
- "user_permissions",
- models.ManyToManyField(
- blank=True,
- help_text="Specific permissions for this user.",
- related_name="user_set",
- related_query_name="user",
- to="auth.Permission",
- verbose_name="user permissions",
- ),
- ),
- ],
- options={
- "verbose_name_plural": "users",
- "verbose_name": "user",
- "abstract": False,
- },
- managers=[("objects", django.contrib.auth.models.UserManager())],
- )
- ]
diff --git a/template/backend/{{copier__project_slug}}/users/migrations/__init__.py b/template/backend/{{copier__project_slug}}/users/migrations/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/template/backend/{{copier__project_slug}}/users/models.py b/template/backend/{{copier__project_slug}}/users/models.py
deleted file mode 100644
index 653012d..0000000
--- a/template/backend/{{copier__project_slug}}/users/models.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from django.contrib.auth.models import AbstractUser
-from django.db.models import CharField
-from django.urls import reverse
-from django.utils.translation import gettext_lazy as _
-
-
-class User(AbstractUser):
- # First Name and Last Name do not cover name patterns
- # around the globe.
- name = CharField(_("Name of User"), blank=True, max_length=255)
-
- def get_absolute_url(self):
- return reverse("users:detail", kwargs={"username": self.username})
diff --git a/template/backend/{{copier__project_slug}}/users/mutations.py b/template/backend/{{copier__project_slug}}/users/mutations.py
deleted file mode 100644
index 32e5fa9..0000000
--- a/template/backend/{{copier__project_slug}}/users/mutations.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import strawberry
-import strawberry_django
-from django.contrib.auth import get_user_model
-from strawberry_django import mutations
-
-from .types import UserType
-
-User = get_user_model()
-
-
-@strawberry_django.input(User)
-class UserRegistrationInput:
- username: strawberry.auto
- password: strawberry.auto
-
-
-@strawberry_django.partial(User)
-class UserPartialUpdateInput:
- id: strawberry.auto
- name: strawberry.auto
-
-
-@strawberry.type
-class UserMutation:
- """
- User mutations
- """
-
- # Auth mutations
- login: UserType = strawberry_django.auth.login()
- logout = strawberry_django.auth.logout()
- register: UserType = strawberry_django.auth.register(UserRegistrationInput)
-
- # User mutations
- update_user: UserType = mutations.update(UserPartialUpdateInput)
diff --git a/template/backend/{{copier__project_slug}}/users/queries.py b/template/backend/{{copier__project_slug}}/users/queries.py
deleted file mode 100644
index 1a09bc5..0000000
--- a/template/backend/{{copier__project_slug}}/users/queries.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import strawberry
-import strawberry_django
-
-from .types import UserType
-
-
-@strawberry.type
-class UserQuery:
- """
- User queries
- """
-
- me: UserType = strawberry_django.auth.current_user()
diff --git a/template/backend/{{copier__project_slug}}/users/tasks.py b/template/backend/{{copier__project_slug}}/users/tasks.py
deleted file mode 100644
index c99341c..0000000
--- a/template/backend/{{copier__project_slug}}/users/tasks.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from django.contrib.auth import get_user_model
-
-from config import celery_app
-
-User = get_user_model()
-
-
-@celery_app.task()
-def get_users_count():
- """A pointless Celery task to demonstrate usage."""
- return User.objects.count()
diff --git a/template/backend/{{copier__project_slug}}/users/tests/__init__.py b/template/backend/{{copier__project_slug}}/users/tests/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/template/backend/{{copier__project_slug}}/users/tests/factories.py b/template/backend/{{copier__project_slug}}/users/tests/factories.py
deleted file mode 100644
index 36f9634..0000000
--- a/template/backend/{{copier__project_slug}}/users/tests/factories.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from collections.abc import Sequence
-from typing import Any
-
-from django.contrib.auth import get_user_model
-from factory import Faker, post_generation
-from factory.django import DjangoModelFactory
-
-
-class UserFactory(DjangoModelFactory):
- username = Faker("user_name")
- email = Faker("email")
- name = Faker("name")
-
- @post_generation
- def password(self, create: bool, extracted: Sequence[Any], **kwargs):
- password = (
- extracted
- if extracted
- else Faker(
- "password",
- length=42,
- special_chars=True,
- digits=True,
- upper_case=True,
- lower_case=True,
- ).evaluate(None, None, extra={"locale": None})
- )
- self.set_password(password)
-
- class Meta:
- model = get_user_model()
- django_get_or_create = ["username"]
diff --git a/template/backend/{{copier__project_slug}}/users/tests/test_forms.py b/template/backend/{{copier__project_slug}}/users/tests/test_forms.py
deleted file mode 100644
index 2c8d86d..0000000
--- a/template/backend/{{copier__project_slug}}/users/tests/test_forms.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import pytest
-
-from {{ copier__project_slug }}.users.forms import UserCreationForm
-from {{ copier__project_slug }}.users.tests.factories import UserFactory
-
-pytestmark = pytest.mark.django_db
-
-
-class TestUserCreationForm:
- def test_clean_username(self):
- # A user with proto_user params does not exist yet.
- proto_user = UserFactory.build()
-
- form = UserCreationForm(
- {
- "username": proto_user.username,
- "password1": proto_user._password,
- "password2": proto_user._password,
- }
- )
-
- assert form.is_valid()
- assert form.clean_username() == proto_user.username
-
- # Creating a user.
- form.save()
-
- # The user with proto_user params already exists,
- # hence cannot be created.
- form = UserCreationForm(
- {
- "username": proto_user.username,
- "password1": proto_user._password,
- "password2": proto_user._password,
- }
- )
-
- assert not form.is_valid()
- assert len(form.errors) == 1
- assert "username" in form.errors
diff --git a/template/backend/{{copier__project_slug}}/users/tests/test_graphql_views.py b/template/backend/{{copier__project_slug}}/users/tests/test_graphql_views.py
deleted file mode 100644
index 475694f..0000000
--- a/template/backend/{{copier__project_slug}}/users/tests/test_graphql_views.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import pytest
-from strawberry_django.test.client import TestClient
-
-from {{ copier__project_slug }}.users.models import User
-
-pytestmark = pytest.mark.django_db
-
-
-class TestGraphQLView:
- """
- Tests for the GraphQL view.
- """
-
- def test_me_unauthenticated(self, graphql_client: TestClient):
- """
- Test that the `me` query does not work when the user is not logged in.
- """
- res = graphql_client.query(
- """
- query Me {
- me {
- id
- username
- name
- email
- isStaff
- isActive
- }
- }
- """,
- asserts_errors=False,
- )
- assert not res.data
- assert res.errors[0]["message"] == "User is not logged in."
-
- def test_me_authenticated(self, user: User, graphql_client: TestClient):
- """
- Test that the `me` query returns the user information if authenticated.
- """
- with graphql_client.login(user):
- res = graphql_client.query(
- """
- query Me {
- me {
- email
- id
- isActive
- isStaff
- name
- username
- }
- }
- """
- )
-
- assert res.errors is None
- assert res.data == {
- "me": {
- "id": str(user.id),
- "email": user.email,
- "name": user.name,
- "username": user.username,
- "isActive": user.is_active,
- "isStaff": user.is_staff,
- },
- }
-
- def test_update_user_unauthenticated(self, graphql_client: TestClient):
- """
- Test that the `update_user` mutation does not allow unauthenticated users.
- """
- res = graphql_client.query(
- """
- mutation MyMutation {
- updateUser(input: {id: 1 name: "John Doe"}) {
- ... on UserType {
- id
- email
- name
- }
- }
- }
- """
- )
-
- assert res.errors is None
- assert res.data == {"updateUser": {}}
-
- def test_update_user_authenticated(self, user: User, graphql_client: TestClient):
- """
- Test that the `update_user` mutation is updating user information.
- """
- expected_name = "John Doe"
- with graphql_client.login(user):
- res = graphql_client.query(
- """
- mutation UpdateUser($input: UserPartialUpdateInput!) {
- updateUser(input: $input) {
- ... on UserType {
- id
- email
- name
- }
- }
- }
- """,
- variables={"input": {"id": str(user.id), "name": expected_name}},
- )
-
- assert res.errors is None
- assert res.data == {
- "updateUser": {
- "id": str(user.id),
- "email": user.email,
- "name": expected_name,
- }
- }
diff --git a/template/backend/{{copier__project_slug}}/users/tests/test_models.py b/template/backend/{{copier__project_slug}}/users/tests/test_models.py
deleted file mode 100644
index b16e929..0000000
--- a/template/backend/{{copier__project_slug}}/users/tests/test_models.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import pytest
-
-from {{ copier__project_slug }}.users.models import User
-
-pytestmark = pytest.mark.django_db
-
-
-def test_user_get_absolute_url(user: User):
- assert user.get_absolute_url() == f"/users/{user.username}/"
diff --git a/template/backend/{{copier__project_slug}}/users/tests/test_tasks.py b/template/backend/{{copier__project_slug}}/users/tests/test_tasks.py
deleted file mode 100644
index cbdc88b..0000000
--- a/template/backend/{{copier__project_slug}}/users/tests/test_tasks.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import pytest
-from celery.result import EagerResult
-
-from {{ copier__project_slug }}.users.tasks import get_users_count
-from {{ copier__project_slug }}.users.tests.factories import UserFactory
-
-pytestmark = pytest.mark.django_db
-
-
-def test_user_count(settings):
- """A basic test to execute the get_users_count Celery task."""
- UserFactory.create_batch(3)
- settings.CELERY_TASK_ALWAYS_EAGER = True
- task_result = get_users_count.delay()
- assert isinstance(task_result, EagerResult)
- assert task_result.result == 3
diff --git a/template/backend/{{copier__project_slug}}/users/tests/test_urls.py b/template/backend/{{copier__project_slug}}/users/tests/test_urls.py
deleted file mode 100644
index ee8f968..0000000
--- a/template/backend/{{copier__project_slug}}/users/tests/test_urls.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import pytest
-from django.urls import resolve, reverse
-
-from {{ copier__project_slug }}.users.models import User
-
-pytestmark = pytest.mark.django_db
-
-
-def test_detail(user: User):
- assert (
- reverse("users:detail", kwargs={"username": user.username})
- == f"/users/{user.username}/"
- )
- assert resolve(f"/users/{user.username}/").view_name == "users:detail"
-
-
-def test_update():
- assert reverse("users:update") == "/users/~update/"
- assert resolve("/users/~update/").view_name == "users:update"
-
-
-def test_redirect():
- assert reverse("users:redirect") == "/users/~redirect/"
- assert resolve("/users/~redirect/").view_name == "users:redirect"
diff --git a/template/backend/{{copier__project_slug}}/users/tests/test_views.py b/template/backend/{{copier__project_slug}}/users/tests/test_views.py
deleted file mode 100644
index 1a3c908..0000000
--- a/template/backend/{{copier__project_slug}}/users/tests/test_views.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import pytest
-from django.contrib.auth.models import AnonymousUser
-from django.http.response import Http404
-from django.test import RequestFactory
-
-from {{ copier__project_slug }}.users.models import User
-from {{ copier__project_slug }}.users.tests.factories import UserFactory
-from {{ copier__project_slug }}.users.views import UserRedirectView, UserUpdateView, user_detail_view
-
-pytestmark = pytest.mark.django_db
-
-
-class TestUserUpdateView:
- """
- TODO:
- extracting view initialization code as class-scoped fixture
- would be great if only pytest-django supported non-function-scoped
- fixture db access -- this is a work-in-progress for now:
- https://github.com/pytest-dev/pytest-django/pull/258
- """
-
- def test_get_success_url(self, user: User, rf: RequestFactory):
- view = UserUpdateView()
- request = rf.get("/fake-url/")
- request.user = user
-
- view.request = request
-
- assert view.get_success_url() == f"/users/{user.username}/"
-
- def test_get_object(self, user: User, rf: RequestFactory):
- view = UserUpdateView()
- request = rf.get("/fake-url/")
- request.user = user
-
- view.request = request
-
- assert view.get_object() == user
-
-
-class TestUserRedirectView:
- def test_get_redirect_url(self, user: User, rf: RequestFactory):
- view = UserRedirectView()
- request = rf.get("/fake-url")
- request.user = user
-
- view.request = request
-
- assert view.get_redirect_url() == f"/users/{user.username}/"
-
-
-class TestUserDetailView:
- def test_authenticated(self, user: User, rf: RequestFactory):
- request = rf.get("/fake-url/")
- request.user = UserFactory()
-
- response = user_detail_view(request, username=user.username)
-
- assert response.status_code == 200
-
- def test_not_authenticated(self, user: User, rf: RequestFactory):
- request = rf.get("/fake-url/")
- request.user = AnonymousUser() # type: ignore
-
- response = user_detail_view(request, username=user.username)
-
- assert response.status_code == 302
- assert response.url == "/accounts/login/?next=/fake-url/"
-
- def test_case_sensitivity(self, rf: RequestFactory):
- request = rf.get("/fake-url/")
- request.user = UserFactory(username="UserName")
-
- with pytest.raises(Http404):
- user_detail_view(request, username="username")
diff --git a/template/backend/{{copier__project_slug}}/users/types.py b/template/backend/{{copier__project_slug}}/users/types.py
deleted file mode 100644
index c01fc8e..0000000
--- a/template/backend/{{copier__project_slug}}/users/types.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import strawberry
-import strawberry_django
-from django.contrib.auth import get_user_model
-
-User = get_user_model()
-
-
-@strawberry_django.type(User)
-class UserType:
- """
- User type
- """
-
- id: strawberry.auto
- username: strawberry.auto
- name: strawberry.auto
- email: strawberry.auto
- is_staff: strawberry.auto
- is_active: strawberry.auto
diff --git a/template/backend/{{copier__project_slug}}/users/urls.py b/template/backend/{{copier__project_slug}}/users/urls.py
deleted file mode 100644
index 32796d5..0000000
--- a/template/backend/{{copier__project_slug}}/users/urls.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from django.urls import path
-from {{ copier__project_slug }}.users.views import user_detail_view, user_redirect_view, user_update_view
-
-app_name = "users"
-urlpatterns = [
- path("~redirect/", view=user_redirect_view, name="redirect"),
- path("~update/", view=user_update_view, name="update"),
- path("/", view=user_detail_view, name="detail"),
-]
diff --git a/template/backend/{{copier__project_slug}}/users/views.py b/template/backend/{{copier__project_slug}}/users/views.py
deleted file mode 100644
index 4a6e397..0000000
--- a/template/backend/{{copier__project_slug}}/users/views.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from django.contrib import messages
-from django.contrib.auth import get_user_model
-from django.contrib.auth.mixins import LoginRequiredMixin
-from django.urls import reverse
-from django.utils.translation import gettext_lazy as _
-from django.views.generic import DetailView, RedirectView, UpdateView
-
-User = get_user_model()
-
-
-class UserDetailView(LoginRequiredMixin, DetailView):
- model = User
- slug_field = "username"
- slug_url_kwarg = "username"
-
-
-user_detail_view = UserDetailView.as_view()
-
-
-class UserUpdateView(LoginRequiredMixin, UpdateView):
- model = User
- fields = ["name"]
-
- def get_success_url(self):
- return reverse("users:detail", kwargs={"username": self.request.user.username})
-
- def get_object(self):
- return User.objects.get(username=self.request.user.username)
-
- def form_valid(self, form):
- messages.add_message(
- self.request, messages.INFO, _("Infos successfully updated")
- )
- return super().form_valid(form)
-
-
-user_update_view = UserUpdateView.as_view()
-
-
-class UserRedirectView(LoginRequiredMixin, RedirectView):
- permanent = False
-
- def get_redirect_url(self):
- return reverse("users:detail", kwargs={"username": self.request.user.username})
-
-
-user_redirect_view = UserRedirectView.as_view()
diff --git a/template/backend/{{copier__project_slug}}/utils/__init__.py b/template/backend/{{copier__project_slug}}/utils/__init__.py
deleted file mode 100644
index 2ce2620..0000000
--- a/template/backend/{{copier__project_slug}}/utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .debugger import pycharm_debugger, vscode_debugger # noqa: F401
diff --git a/template/backend/{{copier__project_slug}}/utils/cloud_storage.py b/template/backend/{{copier__project_slug}}/utils/cloud_storage.py
deleted file mode 100644
index e9c01d6..0000000
--- a/template/backend/{{copier__project_slug}}/utils/cloud_storage.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from storages.backends.s3boto3 import S3Boto3Storage
-
-
-class S3StaticStorage(S3Boto3Storage):
- location = "static"
-
-
-class S3MediaStorage(S3Boto3Storage):
- location = "media"
- file_overwrite = False
diff --git a/template/backend/{{copier__project_slug}}/utils/context_processors.py b/template/backend/{{copier__project_slug}}/utils/context_processors.py
deleted file mode 100644
index de40507..0000000
--- a/template/backend/{{copier__project_slug}}/utils/context_processors.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from django.conf import settings
-
-
-def settings_context(_request):
- return {"settings": settings}
diff --git a/template/backend/{{copier__project_slug}}/utils/debugger.py b/template/backend/{{copier__project_slug}}/utils/debugger.py
deleted file mode 100644
index 957bc41..0000000
--- a/template/backend/{{copier__project_slug}}/utils/debugger.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import logging
-import os
-
-logger = logging.getLogger(__name__)
-
-
-def pycharm_debugger():
- logger.info("Pycharm pydevd connecting...")
- import pydevd_pycharm
- host_ip = os.getenv("DOCKER_GATEWAY_IP")
- try:
- pydevd_pycharm.settrace(
- host_ip, port=6400, stdoutToServer=True, stderrToServer=True, suspend=False
- )
- except ConnectionRefusedError:
- msg = "Debugger connection failed. Check IDE debugger is running and try again. Continuing without debugger."
- logger.error(msg.upper())
-
-
-def vscode_debugger():
- raise NotImplementedError("VSCode debugger not implemented")
diff --git a/template/backend/{{copier__project_slug}}/utils/healthcheck.py b/template/backend/{{copier__project_slug}}/utils/healthcheck.py
deleted file mode 100644
index 540914c..0000000
--- a/template/backend/{{copier__project_slug}}/utils/healthcheck.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import logging
-
-from django.http import HttpResponse, HttpResponseServerError
-
-logger = logging.getLogger("healthz")
-
-
-class HealthCheckMiddleware(object):
- def __init__(self, get_response):
- self.get_response = get_response
- # One-time configuration and initialization.
-
- def __call__(self, request):
- if request.method == "GET":
- if request.path == "/readiness":
- return self.readiness(request)
- elif request.path == "/healthz":
- return self.healthz(request)
- return self.get_response(request)
-
- def healthz(self, request):
- """
- Returns that the server is alive.
- """
- return HttpResponse("OK")
-
- def readiness(self, request):
- # Connect to each database and do a generic standard SQL query
- # that doesn't write any data and doesn't depend on any tables
- # being present.
- from django.db import connections
-
- for name in connections:
- cursor = None
- connection = connections[name]
- try:
- cursor = connection.cursor()
- cursor.execute("SELECT 1;")
- row = cursor.fetchone()
- if row is None:
- return HttpResponseServerError("db: invalid response")
- except Exception as e:
- logger.exception(e)
- return HttpResponseServerError("db: cannot connect to database.")
- finally:
- if cursor:
- cursor.close()
- connection.close()
-
- # Call get_stats() to connect to each memcached instance and get its stats.
- # This can effectively check if each is online.
- try:
- from django.core.cache import caches
- from django.core.cache.backends.memcached import BaseMemcachedCache
-
- for cache in caches.all():
- if isinstance(cache, BaseMemcachedCache):
- stats = cache._cache.get_stats()
- if len(stats) != len(cache._servers):
- return HttpResponseServerError(
- "cache: cannot connect to cache."
- )
- except Exception as e:
- logger.exception(e)
- return HttpResponseServerError("cache: cannot connect to cache.")
-
- return HttpResponse("OK")
diff --git a/template/bitbucket-pipelines.yml b/template/bitbucket-pipelines.yml
deleted file mode 100644
index aa97b25..0000000
--- a/template/bitbucket-pipelines.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-image: python:3.12
-
-definitions:
- services:
- docker:
- memory: 3072
-
- postgres:
- image: postgres:17
- environment:
- POSTGRES_DB: {{ copier__project_slug }}
- POSTGRES_USER: {{ copier__project_slug }}
- POSTGRES_PASSWORD: TESTPASSWORD
-{% if copier__use_celery %}
- redis:
- image: redis:stable
-{% endif %}
-
- steps:
- - step: &backend-tests
- name: Backend Tests
- caches:
- - pip
- script:
- - export DATABASE_URL=postgres://{{ copier__project_slug }}:TESTPASSWORD@localhost:5432/{{ copier__project_slug }}
-{% if copier__use_celery %}
- - export CELERY_BROKER_URL=redis://127.0.0.1:6379/0
-{% endif %}
- - CI=true make backend-test
-
- services:
-{% if copier__use_celery %}
- - redis
-{% endif %}
- - postgres
-
- - step: &check-lint-and-formatting
- name: Check lint and formatting
- caches:
- - pip
- script:
- - make check-lint-and-formatting
-
-{% if copier__create_nextjs_frontend %}
- - step: &check-lint-and-test-frontend
- name: Frontend Lint & Typecheck & Test
- image: node:20
- caches:
- - node
- script:
- - CI=true make check-lint-and-test-frontend
-
-{% endif %}
-
-pipelines:
- default:
- - parallel:
- - step: *check-lint-and-formatting
-{% if copier__create_nextjs_frontend %}
- - step: *check-lint-and-test-frontend
-{% endif %}
- - step: *backend-tests
diff --git a/template/bootstrap-cluster/.env b/template/bootstrap-cluster/.env
index f27dbfb..47241c7 100644
--- a/template/bootstrap-cluster/.env
+++ b/template/bootstrap-cluster/.env
@@ -1,7 +1,2 @@
TOFU_DIR=../terraform
-{%- if copier__operating_system == "talos" %}
-TALOS_FACTORY_IMAGE=factory.talos.dev/installer/10e276a06c1f86b182757a962258ac00655d3425e5957f617bdc82f06894e39b:v1.7.6
-{%- endif %}
-ARGOCD_VERSION=8.2.0
-REPO_URL={{ copier__repo_url }}
-REPO_NAME={{ copier__repo_name }}
+TALOS_FACTORY_IMAGE=factory.talos.dev/installer/10e276a06c1f86b182757a962258ac00655d3425e5957f617bdc82f06894e39b:v1.12.1
diff --git a/template/bootstrap-cluster/README.md b/template/bootstrap-cluster/README.md
index d90541f..e357fcd 100644
--- a/template/bootstrap-cluster/README.md
+++ b/template/bootstrap-cluster/README.md
@@ -1,44 +1,22 @@
-{%- if copier__operating_system == "talos" %}# Bootstrap Talos and ArgoCD
+# Bootstrap Talos
-After deploying infrastructure using Terraform, we can proceed with configuring
-Talos and bootstrapping ArgoCD.
+After deploying infrastructure using Terraform, we can proceed with configuring Talos and bootstrapping the Kubernetes cluster.
-Terraform is solely utilized for deploying infrastructure. Any subsequent
-configuration of Talos or ArgoCD is done using Taskfile tasks.
-{%- elif copier__operating_system == "k3s" %}# Bootstrap k3s and ArgoCD
+Terraform is solely utilized for deploying infrastructure. Any subsequent configuration of Talos is done using Taskfile tasks.
-After deploying infrastructure using Terraform, we can proceed with configuring
-k3s and bootstrapping ArgoCD.
+To view a list of tasks and their descriptions, navigate to the `bootstrap-cluster` directory and execute `task`.
-Terraform is solely utilized for deploying infrastructure. Any subsequent
-configuration of k3s or ArgoCD is done using Taskfile tasks.
-{%- endif %}
+Note that there is a directory for each environment: sandbox, staging, and production.
-To view a list of tasks and their descriptions, navigate to the
-`bootstrap-cluster` directory and execute `task`.
+We recommend opening the AWS serial console for each EC2 instance to monitor the bootstrap process.
-Note that there is a directory for each environment: sandbox, staging, and
-cluster.
+## Bootstrapping Talos
-We recommend opening the AWS serial console for each ec2 instance to monitor the
-bootstrap process.
-
-{%- if copier__operating_system == "talos" %}
-
-### Bootstrapping Talos
-
-{%- elif copier__operating_system == "k3s" %}
-
-### Bootstrapping k3s
-
-{%- endif %}
-
-1. Navigate to the directory corresponding to the environment being set up and
- run:
+1. Navigate to the bootstrap-cluster directory and set the environment:
```shell
+ cd bootstrap-cluster
export ENV=sandbox
- cd $ENV
```
2. Review the `.env` file for the given environment:
@@ -48,9 +26,7 @@ bootstrap process.
CLUSTER_NAME: "{{ copier__project_dash }}-sandbox"
```
-{%- if copier__operating_system == "talos" %}
- Note that we use a Talos factory image. This image contains a system
- extension that provides the ECR credential provider.
+ Note that we use a Talos factory image. This image contains a system extension that provides the ECR credential provider.
```
siderolabs/ecr-credential-provider (v1.28.1)
@@ -59,8 +35,7 @@ bootstrap process.
CredentialProvider API to authenticate against AWS' Elastic Container
Registry and pull images.
```
-{%- endif %}
-{%- if copier__operating_system == "talos" %}
+
3. Bootstrap Talos with the following command:
```
@@ -81,72 +56,40 @@ bootstrap process.
- task: store_controlplane_config
- task: store_talosconfig
- task: apply_talos_config
- - sleep 30
+ - sleep 60
- task: bootstrap_kubernetes
- - sleep 30
+ - sleep 60
- task: generate_kubeconfig
- task: store_kubeconfig
- task: upgrade_talos
- - task: enable_ecr_credential_helper
```
- It takes a few minutes for the cluster nodes to register as etcd
- members and synchronize.
+ It takes a few minutes for the cluster nodes to register as etcd members and synchronize.
-{%- elif copier__operating_system == "k3s" %}
-3. Bootstrap k3s with the following command:
+ When the bootstrap completes successfully, you should see output similar to:
```
- task k3s:bootstrap
+ Upgrading Talos on
+ watching nodes: []
+ * : post check passed
```
- To understand what this task will do, examine the Taskfile configuration:
+ This indicates the Talos upgrade has completed successfully.
- ```yaml
- bootstrap:
- desc: |
- Run all tasks required to bootstrap k3s and Kubernetes cluster.
- requires:
- vars: [ENV]
- cmds:
- - task: save-node-ips
- - task: setup-ssh-key
- - task: install-k3s
- - task: fetch-kubeconfig
- - task: store-kubeconfig
- - task: enable-ecr-credential-helper
- ```
-
- It takes a few minutes for the cluster nodes to register as etcd
- members and synchronize.
-
-{%- endif %}
-
- If the cluster fails to bootstrap, refer to the Troubleshooting section
- below.
+ If the cluster fails to bootstrap, refer to the Troubleshooting section below.
-{%- if copier__operating_system == "talos" %}
4. Verify the health of your cluster with:
```shell
task talos:health
```
- 5. Test kubectl access:
-
- ```shell
- eval $(task talos:kubeconfig)
- kubectl cluster-info
- ```
-{%- elif copier__operating_system == "k3s" %}
-
-4. Test kubectl access:
+5. Test kubectl access:
```shell
- eval $(task k3s:kubeconfig)
+ eval $(task talos:kubeconfig)
kubectl cluster-info
```
-{%- endif %}
This should return output similar to the following:
@@ -158,131 +101,46 @@ bootstrap process.
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
```
-### Bootstrapping ArgoCD
-
-1. Review the branches used for deployment to the sandbox, staging and
- production environments. The default configuration will release the `develop`
- branch to the Sandbox and the `main` branch to the Production environment.
- Make sure to make the `develop` branch the default branch for PRs on a newly
- created Git repository.
-
- Review the branch rule in `bootstrap_root_app` job in
- `bootstrap-cluster/argocd.yaml`:
-
- ```
- vars:
- BRANCH:
- sh: ([ "$ENV" = "production" ] && echo "main" || echo "develop")
- ```
-
- Review the `targetRevision` in the `kustomization.yaml` files shown below:
-
- `argocd/sandbox/apps/kustomization.yaml`:
-
- ```
- patches:
- - patch: |-
- - op: replace
- path: /spec/source/targetRevision
- value: develop
- target:
- kind: Application
- name: ingress
- ...
- - patch: |-
- - op: replace
- path: /spec/source/targetRevision
- value: develop
- target:
- kind: Application
- name: {{ copier__project_slug }}-prod
- ```
-
- `argocd/prod/apps/kustomization.yaml`:
+## Troubleshooting
- ```
- patches:
- - patch: |-
- - op: replace
- path: /spec/source/targetRevision
- value: main
- target:
- kind: Application
- name: ingress
- ```
+### Bootstrap fails with "secret already exists"
-2. Next, we need to create a GitHub deploy key to allow ArgoCD to monitor the
- repo. This step requires access to the 1password vault for your project.
+If you see an error like:
+```
+An error occurred (ResourceExistsException) when calling the CreateSecret operation:
+The operation failed because the secret sandbox_talosconfig_yaml already exists.
+```
- Review the vault name in the `op` cli command in
- `bootstrap-cluster/argocd.yaml`:
+This means you've already run bootstrap before. To restart the bootstrap process:
- ```
- - op item create
- ...
- --vault='{{ copier__project_name }}'
+1. Delete existing secrets and configs:
+ ```shell
+ task talos:reset_config
```
- Sign into 1password with `op signin` and generate the deploy key:
-
+2. Run bootstrap again:
```shell
- task argocd:generate_github_deploy_key
+ task talos:bootstrap
```
-3. Add the deploy key to your Git repository
+**Note:** This is intentionally designed to fail loudly to prevent accidentally overwriting credentials for an existing cluster.
-4. Proceed with installing ArgoCD by executing:
+### Bootstrap fails completely
- ```shell
- task argocd:bootstrap
- ```
-
-The `argocd:bootstrap` task configuration is as follows:
+If bootstrapping Talos fails, we recommend resetting the config files and recreating EC2 instances before trying again.
-```
- bootstrap:
- desc: Setup ArgoCD
- cmds:
- - task: install
- - task: create_repo_credentials_secret
- - task: bootstrap_root_app
-```
+1. Reset config and state with `task talos:reset_config` for the given environment.
-5. ArgoCD will install the Sealed Secrets operator in the cluster. Once it is
- installed, we can generate secrets for the given environment.
+2. Destroy and recreate EC2 instances:
```shell
- cd ..
- make debug-$ENV-secrets
- make $ENV-secrets
+ cd ../terraform/$ENV/
+ tofu destroy \
+ -target "module.cluster.module.control_plane_nodes[0].aws_instance.this[0]" \
+ -target "module.cluster.module.control_plane_nodes[1].aws_instance.this[0]" \
+ -target "module.cluster.module.control_plane_nodes[2].aws_instance.this[0]"
+ tofu plan -out="tfplan.out"
+ tofu apply tfplan.out
```
-6. Commit the `secrets.yaml` file for the given environment and push it to the
- repo.
-
-## Troubleshooting
-
-{%- if copier__operating_system == "talos" %}
-If bootstrapping Talos fails, we recommend resetting the config files and
-recreating ec2 instances before trying again.
-
-1. Reset config and state with `task talos:reset_config` for the given
- environment.
-
-2. Destroy and recreate ec2 instances:
-
- cd ../terraform/$ENV/
- terraform destroy \
- -target "module.cluster.module.control_plane_nodes[0].aws_instance.this[0]" \
- -target "module.c luster.module.control_plane_nodes[1].aws_instance.this[0]" \
- -target "module.c luster.module.control_plane_nodes[1].aws_instance.this[0]"
- terraform plan -out="tfplan.out"
- terraform apply tfplan.out
-{%- elif copier__operating_system == "k3s" %}
-If bootstrapping k3s fails, we recommend uninstalling k3s from each node and
-boostrapping from scratch.
-
-```shell
-task k3s:uninstall-k3s
-```
-{%- endif %}
+3. Try bootstrapping again from step 3.
diff --git a/template/bootstrap-cluster/Taskfile.yml b/template/bootstrap-cluster/Taskfile.yml
index 1113211..1213262 100644
--- a/template/bootstrap-cluster/Taskfile.yml
+++ b/template/bootstrap-cluster/Taskfile.yml
@@ -2,13 +2,9 @@ version: '3'
vars:
ENV: {{ "'{{.ENV}}'" }}
-includes:
- argocd:
- taskfile: ./argocd.yaml
- vars:
- ENV: {{ "'{{.ENV}}'" }}
- ARGOCD_VERSION: {{ "'{{.ARGOCD_VERSION}}'" }}
+dotenv: ['.env', '{{ "{{.ENV}}" }}/.env']
+includes:
talos:
taskfile: ./talos.yaml
vars:
diff --git a/template/bootstrap-cluster/argocd.yaml b/template/bootstrap-cluster/argocd.yaml
deleted file mode 100644
index 13dace0..0000000
--- a/template/bootstrap-cluster/argocd.yaml
+++ /dev/null
@@ -1,180 +0,0 @@
-version: '3'
-silent: true
-
-env:
- GITHUB_DEPLOY_KEY_TITLE: "{{ copier__project_name }} ArgoCD GitHub Deploy Key"
-
-tasks:
- generate_github_deploy_key:
- desc: |
- Generate a GitHub deploy key and store it in AWS Parameter Store
- requires:
- vars: [ENV]
- cmds:
- - |
- set -e
- TMP_DIR=$(mktemp -d)
- ssh-keygen -t ed25519 -f $TMP_DIR/id_ed25519 -q -N ""
- aws ssm put-parameter \
- --name "/{{ copier__project_slug }}/{{ '{{.ENV}}' }}/argocd/github_deploy_key" \
- --value "file://$TMP_DIR/id_ed25519" \
- --type SecureString --overwrite
- aws ssm put-parameter \
- --name "/{{ copier__project_slug }}/{{ '{{.ENV}}' }}/argocd/github_deploy_key.pub" \
- --value "file://$TMP_DIR/id_ed25519.pub" \
- --type String --overwrite
- rm -rf $TMP_DIR
-
- add_github_deploy_key:
- desc: Add the public key to the GitHub repository as a deploy key
- requires:
- vars: [ENV]
- cmds:
- - |
- set -e
- KEY_ID=$(gh repo deploy-key list | grep "$GITHUB_DEPLOY_KEY_TITLE" | cut -f1)
- if [ -n "$KEY_ID" ]; then
- echo "Deleting existing deploy key '$GITHUB_DEPLOY_KEY_TITLE'..."
- gh repo deploy-key delete "$KEY_ID"
- fi
- TMP_DIR=$(mktemp -d)
- aws ssm get-parameter \
- --name "/{{ copier__project_slug }}/{{ '{{.ENV}}' }}/argocd/github_deploy_key.pub" \
- --query Parameter.Value --output text > $TMP_DIR/id_ed25519.pub
- echo "Adding new deploy key '$GITHUB_DEPLOY_KEY_TITLE'..."
- gh repo deploy-key add $TMP_DIR/id_ed25519.pub --title "$GITHUB_DEPLOY_KEY_TITLE"
- rm -rf $TMP_DIR
-
- bootstrap:
- desc: Setup ArgoCD
- requires:
- vars: [ENV]
- cmds:
- - task: install
- vars: {ENV: {{ "'{{.ENV}}'" }}, ARGOCD_VERSION: {{ "'{{.ARGOCD_VERSION}}'" }}}
- - task: generate_github_deploy_key
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: add_github_deploy_key
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: create_repo_credentials_secret
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: bootstrap_root_app
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: monitor_sealed_secrets
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: generate_sealed_secrets
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: commit_sealed_secrets
- vars: {ENV: {{ "'{{.ENV}}'" }}}
-
- install:
- desc: Deploy ArgoCD using Helm
- env:
- KUBECONFIG: "{{ '{{.ENV}}' }}/kubeconfig"
- cmds:
- - helm repo add argocd https://argoproj.github.io/argo-helm
- - helm repo update
- - helm install argocd argo-cd
- --repo https://argoproj.github.io/argo-helm
- --version {{ '{{.ARGOCD_VERSION}}' }}
- --namespace argocd
- --create-namespace
- --set configs.params.server.insecure=true
-
- create_repo_credentials_secret:
- requires:
- vars: [ENV]
- vars:
- GITHUB_DEPLOY_KEY_B64:
- sh: aws ssm get-parameter --name "/{{ copier__project_slug }}/{{ '{{.ENV}}' }}/argocd/github_deploy_key" --with-decryption --query Parameter.Value --output text | base64 -w0
- desc: Create and apply repo credentials secret for each repo
- env:
- KUBECONFIG: "{{ '{{.ENV}}' }}/kubeconfig"
- cmds:
- - |
- cat </dev/null 2>&1; do
- sleep 5
- done
- echo "Pod created. Waiting for it to be ready..."
- kubectl wait --for=condition=ready pod -n kube-system -l app.kubernetes.io/name=sealed-secrets --timeout=120s
- echo "Waiting for sealed-secrets-controller service to be available..."
- until kubectl get endpoints -n kube-system sealed-secrets-controller -o jsonpath='{.subsets[?(@.addresses)].addresses[0].ip}' >/dev/null 2>&1; do
- sleep 5
- done
- echo "sealed-secrets-controller is ready."
-
- generate_sealed_secrets:
- desc: Generate sealed secrets for the environment
- requires:
- vars: [ENV]
- env:
- KUBECONFIG: "{{ '{{.ENV}}' }}/kubeconfig"
- cmds:
- - |
- AWS_S3_ACCESS_KEY_ID=$(tofu -chdir=../terraform/{{ '{{.ENV}}' }} output -raw cnpg_user_access_key) \
- AWS_S3_SECRET_ACCESS_KEY=$(tofu -chdir=../terraform/{{ '{{.ENV}}' }} output -raw cnpg_user_secret_key) \
- DJANGO_SECRET_KEY=$(LC_CTYPE=C tr -dc A-Za-z0-9 ../k8s/{{ '{{.ENV}}' }}/secrets.yaml
-
- commit_sealed_secrets:
- desc: Commit and push the sealed secrets
- requires:
- vars: [ENV]
- cmds:
- - |
- git add ../k8s/{{ '{{.ENV}}' }}/secrets.yaml
- git commit -m "feat(secrets): add sealed secrets for {{ '{{.ENV}}' }}"
- git push
diff --git a/template/bootstrap-cluster/k3s.yaml b/template/bootstrap-cluster/k3s.yaml
deleted file mode 100644
index f92b5fd..0000000
--- a/template/bootstrap-cluster/k3s.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
----
-version: '3'
-silent: true
-vars:
- ENV: {{ "'{{.ENV}}'" }}
-
-tasks:
- bootstrap:
- desc: |
- Run all tasks required to bootstrap k3s and Kubernetes cluster.
- requires:
- vars: [ENV]
- cmds:
- - task: save-node-ips
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: setup-ssh-key
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: install-ecr-credential-helper
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: install-k3s
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: fetch-kubeconfig
- vars: {ENV: {{ "'{{.ENV}}'" }}}
- - task: store-kubeconfig
- vars: {ENV: {{ "'{{.ENV}}'" }}}
-
- save-node-ips:
- desc: Save node IPs to files for later reference
- requires:
- vars: [ENV]
- cmds:
- - tofu -chdir=../terraform/{{ '{{.ENV}}' }} output control_plane_nodes_public_ips | sed 's/"//g; s/,/\n/g' > {{ '{{.ENV}}' }}/public_ips.txt
- - tofu -chdir=../terraform/{{ '{{.ENV}}' }} output control_plane_nodes_private_ips | sed 's/"//g; s/,/\n/g' > {{ '{{.ENV}}' }}/private_ips.txt
-
- setup-ssh-key:
- desc: Extract and save SSH key from terraform output
- cmds:
- - tofu -chdir=../terraform/{{ '{{.ENV}}' }} output -raw private_deploy_key > {{ '{{.ENV}}' }}/id_ed25519
- - chmod 600 {{ '{{.ENV}}' }}/id_ed25519
-
- ssh-server-node:
- vars:
- IP:
- sh: head -n1 {{ '{{.ENV}}' }}/public_ips.txt
- requires:
- vars: [ENV]
- cmds:
- - TERM=xterm-256color ssh -oStrictHostKeyChecking=no -i {{ '{{.ENV}}' }}/id_ed25519 ubuntu@{{ '{{.IP}}' }}
-
- install-ecr-credential-helper:
- desc: |
- Install the ECR credential helper on all control plane nodes.
- vars:
- NODE_IPS:
- sh: cat {{ '{{.ENV}}' }}/public_ips.txt
- requires:
- vars: [ENV]
- cmds:
- - |
- for ip in $(echo "{{ '{{.NODE_IPS}}' }}"); do
- echo "Installing ECR credential helper on $ip..."
- # Download and install the ECR credential provider
- ssh -oStrictHostKeyChecking=no -i {{ '{{.ENV}}' }}/id_ed25519 ubuntu@$ip \
- 'sudo wget https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.29.0/linux/amd64/ecr-credential-provider-linux-amd64 -O /usr/local/bin/ecr-credential-provider && \
- sudo chmod +x /usr/local/bin/ecr-credential-provider'
-
- # Create credential provider config
- ssh -oStrictHostKeyChecking=no -i {{ '{{.ENV}}' }}/id_ed25519 ubuntu@$ip \
- 'sudo mkdir -p /etc/rancher/k3s && \
- echo "apiVersion: kubelet.config.k8s.io/v1
- kind: CredentialProviderConfig
- providers:
- - name: ecr-credential-provider
- matchImages:
- - \"*.dkr.ecr.*.amazonaws.com\"
- defaultCacheDuration: \"12h\"
- apiVersion: credentialprovider.kubelet.k8s.io/v1" | sudo tee /etc/rancher/k3s/credential-provider-config.yaml'
- done
-
- install-k3s:
- desc: Install k3s on nodes
- vars:
- PUBLIC_IPS:
- sh: cat {{ '{{.ENV}}' }}/public_ips.txt
- PRIVATE_IPS:
- sh: cat {{ '{{.ENV}}' }}/private_ips.txt
- FIRST_PUBLIC_IP:
- sh: head -n1 {{ '{{.ENV}}' }}/public_ips.txt
- FIRST_PRIVATE_IP:
- sh: head -n1 {{ '{{.ENV}}' }}/private_ips.txt
- OTHER_PUBLIC_IPS:
- sh: tail -n +2 {{ '{{.ENV}}' }}/public_ips.txt
- OTHER_PRIVATE_IPS:
- sh: tail -n +2 {{ '{{.ENV}}' }}/private_ips.txt
- NODE_COUNT:
- sh: wc -l < {{ '{{.ENV}}' }}/public_ips.txt
- K3S_TOKEN:
- sh: tr -dc A-Za-z0-9 /dev/null 2>&1; then
- aws secretsmanager put-secret-value \
- --secret-id "{{copier__project_slug}}_{{ '{{.ENV}}' }}_kubeconfig" \
- --secret-string "$(base64 -w0 {{ '{{.ENV}}' }}/kubeconfig)" > /dev/null
- else
- aws secretsmanager create-secret \
- --name "{{copier__project_slug}}_{{ '{{.ENV}}' }}_kubeconfig" \
- --secret-string "$(base64 -w0 {{ '{{.ENV}}' }}/kubeconfig)" > /dev/null
- fi
- delete-all-secrets:
- desc: |
- Delete all related secrets from AWS Secrets Manager.
- requires:
- vars: [ENV]
- cmds:
- - |
- aws secretsmanager delete-secret \
- --secret-id "{{copier__project_slug}}_{{ '{{.ENV}}' }}_kubeconfig" \
- --force-delete-without-recovery > /dev/null 2>&1
-
- reset-config:
- desc: |
- Remove all config files and delete secrets in AWS Secrets Manager
- requires:
- vars: [ENV]
- cmds:
- - task: delete_all_secrets
- - cd {{ '{{.ENV}}' }} && rm -f kubeconfig
-
- kubeconfig:
- desc: |
- Retrieve the Kubeconfig for a given cluster
-
- Since the commands run in sub-shell you can eval the output to export
- KUBECONFIG to your current shell:
-
- eval $(task k3s:kubeconfig)
- silent: true
- cmds:
- - echo "export KUBECONFIG=$(pwd)/{{ '{{.ENV}}' }}/kubeconfig"
-
- uninstall-k3s:
- desc: Uninstall k3s from all nodes
- vars:
- PUBLIC_IPS:
- sh: cat {{ '{{.ENV}}' }}/public_ips.txt
- requires:
- vars: [ENV]
- silent: true
- cmds:
- - |
- for ip in $(echo "{{ '{{.PUBLIC_IPS}}' }}"); do
- echo "Uninstalling k3s from $ip..."
- ssh -oStrictHostKeyChecking=no -i {{ '{{.ENV}}' }}/id_ed25519 ubuntu@$ip \
- '/usr/local/bin/k3s-uninstall.sh'
- done
-
- fetch-config:
- desc: |
- Fetch kubeconfig from AWS Secrets
- Manager.
- requires:
- vars: [ENV]
- cmds:
- - |
- if [ -f {{ '{{.ENV}}' }}/kubeconfig ]; then
- cp {{ '{{.ENV}}' }}/kubeconfig {{ '{{.ENV}}' }}/kubeconfig.bak.$(date +%Y%m%d_%H%M%S)
- fi
- aws secretsmanager get-secret-value \
- --secret-id "{{copier__project_slug}}_{{ '{{.ENV}}' }}_kubeconfig" | \
- yq ".SecretString" | tr -d '"' | \
- base64 -d > ./{{ '{{.ENV}}' }}/kubeconfig
- - chmod 600 {{ '{{.ENV}}' }}/kubeconfig
diff --git a/template/bootstrap-cluster/production/.env b/template/bootstrap-cluster/production/.env
index 23bfad7..88f4368 100644
--- a/template/bootstrap-cluster/production/.env
+++ b/template/bootstrap-cluster/production/.env
@@ -1,2 +1,2 @@
-CONTROL_PLANE_ENDPOINT="https://k8s.{{ copier__domain_name }}:6443"
+CONTROL_PLANE_ENDPOINT="https://k8s.prod.{{ copier__domain_name }}:6443"
CLUSTER_NAME="{{ copier__project_dash }}-prod"
diff --git a/template/bootstrap-cluster/root-app.template.yaml b/template/bootstrap-cluster/root-app.template.yaml
deleted file mode 100644
index d4657bb..0000000
--- a/template/bootstrap-cluster/root-app.template.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-apiVersion: argoproj.io/v1alpha1
-kind: Application
-metadata:
- name: root
- namespace: argocd
- creationTimestamp: null
- finalizers:
- - resources-finalizer.argocd.argoproj.io
- labels:
- app.kubernetes.io/name: root
-spec:
- destination:
- namespace: argocd
- server: https://kubernetes.default.svc
- project: default
- source:
- path: argocd/${ENV}/apps
- repoURL: {{ copier__repo_url }}
- targetRevision: ${BRANCH}
- syncPolicy:
- automated:
- allowEmpty: true
- prune: true
- selfHeal: true
- syncOptions:
- - allowEmpty=true
diff --git a/template/bootstrap-cluster/talos.yaml b/template/bootstrap-cluster/talos.yaml
index 87c3a73..0503b45 100644
--- a/template/bootstrap-cluster/talos.yaml
+++ b/template/bootstrap-cluster/talos.yaml
@@ -1,7 +1,4 @@
version: '3'
-env:
- TALOSCONFIG: ./{{ '{{.ENV}}' }}/talosconfig
- KUBECONFIG: ./{{ '{{.ENV}}' }}/kubeconfig
tasks:
bootstrap:
desc: |
@@ -19,11 +16,16 @@ tasks:
- sleep 60
- task: generate_kubeconfig
- task: store_kubeconfig
- - task: upgrade_talos
+ # - task: upgrade_talos
generate_configs:
desc: |
Generate the initial Talos configuration files for the control plane
and Talos nodes.
+ vars:
+ PUBLIC_IPS:
+ sh: |
+ tofu -chdir={{ '{{.TOFU_DIR}}' }}/{{ '{{.ENV}}' }} \
+ output -raw control_plane_nodes_public_ips
cmds:
- |
talosctl gen config {{ '{{.CLUSTER_NAME}}' }} {{ '{{.CONTROL_PLANE_ENDPOINT}}' }} \
@@ -31,6 +33,7 @@ tasks:
--output ./{{ '{{.ENV}}' }}/ \
--config-patch @patches/patch-machine.yaml \
--config-patch-control-plane @patches/patch-control-plane.yaml \
+ --additional-sans {{ '{{.PUBLIC_IPS}}' }} \
--with-examples=false \
--with-docs=false
set_node_ips:
@@ -45,18 +48,21 @@ tasks:
requires:
vars: [ENV]
cmds:
- - talosctl --talosconfig $TALOSCONFIG config endpoint {{ '{{.EC2_HOSTS}}' }}
+ - talosctl --talosconfig ./{{ '{{.ENV}}' }}/talosconfig config endpoint {{ '{{.EC2_HOSTS}}' }}
+ - |
+ echo "✅ Endpoints set in talosconfig:"
+ grep -A 3 "endpoints:" ./{{ '{{.ENV}}' }}/talosconfig | head -5
apply_talos_config:
desc: |
Apply the Talos configuration to all nodes in the control plane.
cmds:
- |
- for node in $(yq -r '.contexts[].endpoints[]' $TALOSCONFIG); do \
+ for node in $(yq -r '.contexts[].endpoints[]' ./{{ '{{.ENV}}' }}/talosconfig); do \
echo "Applying config to $node" && \
talosctl apply-config \
--insecure \
--nodes $node \
- --talosconfig $TALOSCONFIG \
+ --talosconfig ./{{ '{{.ENV}}' }}/talosconfig \
--file ./{{ '{{.ENV}}' }}/controlplane.yaml; \
done
bootstrap_kubernetes:
@@ -66,10 +72,10 @@ tasks:
FIRST_NODE:
sh: |
tofu -chdir={{ '{{.TOFU_DIR}}' }}/{{ '{{.ENV}}' }} \
- output -raw control_plane_nodes_private_ips | \
+ output -raw control_plane_nodes_public_ips | \
cut -d',' -f1
cmds:
- - talosctl bootstrap --talosconfig $TALOSCONFIG --nodes {{ '{{.FIRST_NODE}}' }}
+ - talosctl bootstrap --talosconfig ./{{ '{{.ENV}}' }}/talosconfig --nodes {{ '{{.FIRST_NODE}}' }}
generate_kubeconfig:
desc: |
Generate the kubeconfig file to access the Kubernetes cluster using
@@ -78,19 +84,24 @@ tasks:
FIRST_NODE:
sh: |
tofu -chdir={{ '{{.TOFU_DIR}}' }}/{{ '{{.ENV}}' }} \
- output -raw control_plane_nodes_private_ips | \
+ output -raw control_plane_nodes_public_ips | \
cut -d',' -f1
cmds:
- - talosctl kubeconfig $KUBECONFIG --talosconfig $TALOSCONFIG --nodes {{ '{{.FIRST_NODE}}' }} --force
+ - talosctl kubeconfig ./{{ '{{.ENV}}' }}/kubeconfig --talosconfig ./{{ '{{.ENV}}' }}/talosconfig --nodes {{ '{{.FIRST_NODE}}' }} --force
upgrade_talos:
desc: |
Upgrade Talos on all control plane nodes to a specified version.
+ vars:
+ CONTROL_PLANE_PUBLIC_IPS:
+ sh: |
+ tofu -chdir={{ '{{.TOFU_DIR}}' }}/{{ '{{.ENV}}' }} \
+ output -raw control_plane_nodes_public_ips | tr ',' ' '
cmds:
#- task: health
- |
- for node in $(echo "{{ '{{.CONTROL_PLANE_PRIVATE_IPS}}' }}"); do
+ for node in {{ '{{.CONTROL_PLANE_PUBLIC_IPS}}' }}; do
echo "Upgrading Talos on $node" && \
- talosctl upgrade --talosconfig $TALOSCONFIG --nodes $node --image {{ '{{.TALOS_FACTORY_IMAGE}}' }}; \
+ talosctl upgrade --talosconfig ./{{ '{{.ENV}}' }}/talosconfig --nodes $node --image {{ '{{.TALOS_FACTORY_IMAGE}}' }}; \
done
store_talosconfig:
desc: |
@@ -101,7 +112,7 @@ tasks:
- |
aws secretsmanager create-secret \
--name "{{ '{{.ENV}}' }}_talosconfig_yaml" \
- --secret-string "$(base64 -w0 $TALOSCONFIG)"
+ --secret-string "$(base64 -w0 ./{{ '{{.ENV}}' }}/talosconfig)"
store_controlplane_config:
desc: |
Store the Talos control plane configuration file in AWS Secrets
@@ -122,7 +133,7 @@ tasks:
- |
aws secretsmanager create-secret \
--name "{{ '{{.ENV}}' }}_kubeconfig" \
- --secret-string "$(base64 -w0 $KUBECONFIG)"
+ --secret-string "$(base64 -w0 ./{{ '{{.ENV}}' }}/kubeconfig)"
health:
desc: |
Check the health of the Talos cluster.
@@ -130,33 +141,31 @@ tasks:
FIRST_NODE:
sh: |
tofu -chdir={{ '{{.TOFU_DIR}}' }}/{{ '{{.ENV}}' }} \
- output -raw control_plane_nodes_private_ips | \
+ output -raw control_plane_nodes_public_ips | \
cut -d',' -f1
cmds:
- - talosctl health --talosconfig $TALOSCONFIG --nodes {{ '{{.FIRST_NODE}}' }}
+ - talosctl health --talosconfig ./{{ '{{.ENV}}' }}/talosconfig --nodes {{ '{{.FIRST_NODE}}' }}
services:
desc: |
Check service status on all notes.
cmds:
- |
- for node in $(yq -r '.contexts[].endpoints[]' $TALOSCONFIG); do
- talosctl service --talosconfig $TALOSCONFIG --nodes $node;
+ for node in $(yq -r '.contexts[].endpoints[]' ./{{ '{{.ENV}}' }}/talosconfig); do
+ talosctl service --talosconfig ./{{ '{{.ENV}}' }}/talosconfig --nodes $node;
done
delete_all_secrets:
desc: |
Delete all related secrets from AWS Secrets Manager.
+ Restores secrets scheduled for deletion before deleting them.
requires:
vars: [ENV]
cmds:
- |
- aws secretsmanager delete-secret \
- --secret-id "{{ '{{.ENV}}' }}_kubeconfig" --force-delete-without-recovery
- - |
- aws secretsmanager delete-secret \
- --secret-id "{{ '{{.ENV}}' }}_talosconfig_yaml" --force-delete-without-recovery
- - |
- aws secretsmanager delete-secret \
- --secret-id "{{ '{{.ENV}}' }}_talos_controlplane_yaml" --force-delete-without-recovery
+ for secret in "{{ '{{.ENV}}' }}_kubeconfig" "{{ '{{.ENV}}' }}_talosconfig_yaml" "{{ '{{.ENV}}' }}_talos_controlplane_yaml"; do
+ echo "Deleting secret: $secret"
+ aws secretsmanager restore-secret --secret-id "$secret" 2>/dev/null || true
+ aws secretsmanager delete-secret --secret-id "$secret" --force-delete-without-recovery 2>/dev/null || true
+ done
reset_config:
desc: |
Remove all config files and delete secrets in AWS Secrets Manager
diff --git a/template/docs/README.md b/template/docs/README.md
index f0f4c49..a2e0d7b 100644
--- a/template/docs/README.md
+++ b/template/docs/README.md
@@ -1,17 +1,33 @@
-# Project Documentation
-
-You are encouraged to add in this directory all documentation
-useful for others (and you in future) to understand better
-how the project work and to help developers to prepare their environment.
-
-Examples (not all possibilities) of good things that can be documented:
-
-- How to configure development environment using third-party
-applications and integrations (e.g. Slack, Discord, external
-APIs, etc);
-- How to prepare the environment to accomplish a specific task
-that demands business knowledge and populate data in multible tables;
-- Glossary of business terms and actions that are not obvious
-to the general public;
-- Anything that you needed to research inside the project and
-it could help a new developer not to have spent time on that research.
+# Talos Kubernetes Cluster Documentation
+
+This directory contains documentation for deploying and managing a Talos Linux Kubernetes cluster on AWS.
+
+## Overview
+
+This project provides infrastructure-as-code templates for deploying a production-ready Talos Linux Kubernetes cluster on AWS using Terraform and OpenTofu.
+
+## Documentation Contents
+
+- **[Project Overview](./project-overview.md)** - Introduction to the template and its purpose
+- **[Architecture](./architecture.md)** - Infrastructure architecture and design
+- **[Deployment](./deployment.md)** - Step-by-step deployment instructions
+- **[Secrets Management](./secrets.md)** - How to manage sensitive configuration
+
+## What is Talos Linux?
+
+Talos Linux is a modern, minimal Linux distribution designed specifically for running Kubernetes. Key features:
+
+- **Immutable**: No SSH, no shell, configuration via API only
+- **Secure**: Minimal attack surface, all management via encrypted API
+- **Kubernetes-Native**: Built exclusively for running Kubernetes workloads
+- **API-Driven**: All operations performed via declarative configuration
+
+## Quick Start
+
+1. Generate a new project from this template using Copier
+2. Configure AWS credentials
+3. Deploy infrastructure: `cd terraform/sandbox && tofu apply`
+4. Bootstrap Talos cluster: `cd bootstrap-cluster/sandbox && task talos:bootstrap`
+5. Access your cluster: `kubectl --kubeconfig kubeconfig get nodes`
+
+For detailed instructions, see the [Deployment Documentation](./deployment.md).
diff --git a/template/docs/architecture.md b/template/docs/architecture.md
index cc48e32..032f2d8 100644
--- a/template/docs/architecture.md
+++ b/template/docs/architecture.md
@@ -1,10 +1,10 @@
# :house: Architecture
-This document outlines the architecture of the application, with diagrams to visualize the different components and their relationships.
+This document outlines the infrastructure architecture for deploying Talos Linux Kubernetes clusters on AWS.
## System Overview
-The template provides a full-stack application with the following components:
+The template provides infrastructure for deploying a Talos Linux Kubernetes cluster with the following components:
```mermaid
%%{
@@ -21,59 +21,48 @@ The template provides a full-stack application with the following components:
}
}%%
flowchart TD
- %% External entities
- User([User])
-
- %% Top-level system components
- subgraph SystemOverview["SCAF System Overview"]
- %% Infrastructure layer
+ subgraph SystemOverview["Talos Cluster Architecture"]
subgraph InfraLayer["Infrastructure Layer"]
- TF[Terraform] --> AWS[AWS Resources]
- K8S[Kubernetes] --> CLUSTER[K8s Cluster]
+ TF[Terraform/OpenTofu] --> AWS[AWS Resources]
+ AWS --> VPC[VPC Network]
+ AWS --> EC2[EC2 Instances]
+ AWS --> DNS[Route53 DNS]
end
-
- %% Application layer
- subgraph AppLayer["Application Layer"]
- FE[Next.js Frontend] --> |GraphQL| BE[Django Backend]
- BE --> |Queries/Mutations| DB[(PostgreSQL)]
- BE --> |Async Tasks| CELERY[Celery Workers]
- CELERY --> REDIS[Redis]
- CELERY --> |Email| MAIL[Email Service]
+
+ subgraph OSLayer["Operating System Layer"]
+ EC2 --> TALOS[Talos Linux v1.12.1]
+ TALOS --> API[Talos API]
+ end
+
+ subgraph K8sLayer["Kubernetes Layer"]
+ API --> K8S[Kubernetes Cluster]
+ K8S --> CP[Control Plane]
+ CP --> APISERVER[API Server]
+ CP --> ETCD[etcd]
+ CP --> SCHEDULER[Scheduler]
end
-
- %% Connections between layers
- CLUSTER --> APP[Application Stack]
- AWS --> APP
end
-
- %% External connections
- User -->|interacts with| FE
%% Style definitions - Gruvbox Dark theme
- classDef external fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
classDef infrastructure fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef application fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef database fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold,shape:cylinder
- classDef queue fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
+ classDef os fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+ classDef kubernetes fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
%% Apply styles
- class User external
- class TF,K8S,AWS,CLUSTER,APP infrastructure
- class FE,BE,CELERY,MAIL application
- class DB database
- class REDIS queue
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
+ class TF,AWS,VPC,EC2,DNS infrastructure
+ class TALOS,API os
+ class K8S,CP,APISERVER,ETCD,SCHEDULER kubernetes
+
+ %% Explicit styling for subgraphs
style SystemOverview fill:#282828,color:#fabd2f,font-weight:bold
style InfraLayer fill:#282828,color:#fabd2f,font-weight:bold
- style AppLayer fill:#282828,color:#fabd2f,font-weight:bold
+ style OSLayer fill:#282828,color:#fabd2f,font-weight:bold
+ style K8sLayer fill:#282828,color:#fabd2f,font-weight:bold
```
-## Application Architecture
-
-### Frontend (Next.js)
+## AWS Infrastructure (Terraform)
-The frontend is built with Next.js and TypeScript, using Apollo Client for GraphQL communication with the backend.
+The cloud infrastructure is managed with Terraform/OpenTofu, provisioning the following AWS resources:
```mermaid
%%{
@@ -89,89 +78,61 @@ The frontend is built with Next.js and TypeScript, using Apollo Client for Graph
}
}
}%%
-flowchart LR
- %% Node styling
-
- subgraph FrontendArch["Frontend Architecture"]
- subgraph NextComponents["Next.js Components"]
- PAGES[Pages] --> COMPS[Components]
- PAGES --> HOOKS[Hooks/Utils]
- APOLLO[Apollo Client] --> GQL[GraphQL Queries/Mutations]
- PAGES --> APOLLO
- end
- end
-
- APOLLO --> |HTTP/GraphQL| API[Backend API]
-
- %% Style definitions - Gruvbox Dark theme
- classDef frontend fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef api fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
-
- %% Apply styles
- class PAGES,COMPS,HOOKS,APOLLO,GQL frontend
- class API api
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
- style FrontendArch fill:#282828,color:#fabd2f,font-weight:bold
- style NextComponents fill:#282828,color:#fabd2f,font-weight:bold
-```
+flowchart TB
+ INTERNET([Internet])
-### Backend (Django)
+ subgraph AWSArch["AWS Infrastructure"]
+ R53[Route53 DNS] --> ELB[Elastic Load Balancer]
-The backend is built with Django, using Django GraphQL and Celery for async task processing.
+ ELB --> VPC[VPC Network]
-```mermaid
-%%{
- init: {
- 'theme': 'base',
- 'themeVariables': {
- 'primaryColor': '#282828',
- 'primaryTextColor': '#ebdbb2',
- 'primaryBorderColor': '#7c6f64',
- 'lineColor': '#7c6f64',
- 'secondaryColor': '#3c3836',
- 'tertiaryColor': '#504945'
- }
- }
-}%%
-flowchart TB
- %% External entities
- User([User])
-
- subgraph BackendArch["Backend Architecture"]
- subgraph DjangoComponents["Django Components"]
- URLS[URL Configuration] --> VIEWS[Views/GraphQL]
- VIEWS --> MODELS[Models]
- MODELS --> DB[(PostgreSQL)]
- VIEWS --> |Async Tasks| TASKS[Celery Tasks]
- TASKS --> REDIS[Redis]
- end
+ VPC --> SG[Security Groups]
+ VPC --> SUBNET[Public Subnets]
+
+ SUBNET --> EC2_1[EC2 Control Plane 1 Talos Linux]
+ SUBNET --> EC2_2[EC2 Control Plane 2 Talos Linux]
+ SUBNET --> EC2_3[EC2 Control Plane 3 Talos Linux]
+
+ SG --> EC2_1
+ SG --> EC2_2
+ SG --> EC2_3
+
+ IAM[IAM Roles] --> EC2_1
+ IAM --> EC2_2
+ IAM --> EC2_3
end
-
- User --> |HTTP Request| VIEWS
-
+
+ INTERNET --> |k8s.domain.com:6443| R53
+
%% Style definitions - Gruvbox Dark theme
classDef external fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
- classDef backend fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef database fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold,shape:cylinder
- classDef queue fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
-
+ classDef network fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
+ classDef compute fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+ classDef security fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
+
%% Apply styles
- class User external
- class URLS,VIEWS,MODELS,TASKS backend
- class DB database
- class REDIS queue
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
- style BackendArch fill:#282828,color:#fabd2f,font-weight:bold
- style DjangoComponents fill:#282828,color:#fabd2f,font-weight:bold
+ class INTERNET external
+ class R53,ELB,VPC,SUBNET network
+ class EC2_1,EC2_2,EC2_3 compute
+ class SG,IAM security
+
+ %% Explicit styling for subgraph
+ style AWSArch fill:#282828,color:#fabd2f,font-weight:bold
```
-## Infrastructure Architecture
+### Infrastructure Components
-### Kubernetes Deployment
+- **VPC**: Isolated network with configurable CIDR blocks
+- **Public Subnets**: Subnets across multiple availability zones for high availability
+- **Security Groups**: Firewall rules for Kubernetes API (6443), Talos API (50000), and inter-node communication
+- **EC2 Instances**: Control plane nodes running Talos Linux AMI
+- **Elastic Load Balancer**: Load balances traffic to control plane nodes
+- **Route53**: DNS records for cluster API endpoint (k8s.domain.com)
+- **IAM Roles**: Permissions for EC2 instances to access AWS services
-The application is deployed on Kubernetes, with separate environments for development, staging, and production.
+## Talos Linux Architecture
+
+Talos Linux provides the operating system layer that creates and manages the Kubernetes cluster:
```mermaid
%%{
@@ -188,150 +149,63 @@ The application is deployed on Kubernetes, with separate environments for develo
}
}%%
flowchart TB
- %% External entities
- User([User])
-
- subgraph K8sArch["Kubernetes Architecture"]
- INGRESS[Ingress Controller] --> FE_SVC[Frontend Service]
- INGRESS --> BE_SVC[Backend Service]
-
- FE_SVC --> FE_POD[Frontend Pods]
- BE_SVC --> BE_POD[Backend Pods]
-
- CELERY_SVC[Celery Service] --> CELERY_POD[Celery Pods]
-
- DB_SVC[Database Service] --> DB_POD[Database Pods]
- REDIS_SVC[Redis Service] --> REDIS_POD[Redis Pods]
- end
-
- User --> |HTTPS| INGRESS
-
- %% Style definitions - Gruvbox Dark theme
- classDef external fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
- classDef ingress fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef service fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef pod fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
-
- %% Apply styles
- class User external
- class INGRESS ingress
- class FE_SVC,BE_SVC,CELERY_SVC,DB_SVC,REDIS_SVC service
- class FE_POD,BE_POD,CELERY_POD,DB_POD,REDIS_POD pod
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
- style K8sArch fill:#282828,color:#fabd2f,font-weight:bold
-```
+ ADMIN([Cluster Administrator])
-### AWS Infrastructure (Terraform)
+ subgraph TalosArch["Talos Linux Architecture"]
+ TALOSCTL[talosctl CLI] --> |gRPC/TLS| TALOSAPI[Talos API :50000]
-The cloud infrastructure is managed with Terraform, provisioning AWS resources.
+ TALOSAPI --> MACHINED[machined System Service Manager]
-```mermaid
-%%{
- init: {
- 'theme': 'base',
- 'themeVariables': {
- 'primaryColor': '#282828',
- 'primaryTextColor': '#ebdbb2',
- 'primaryBorderColor': '#7c6f64',
- 'lineColor': '#7c6f64',
- 'secondaryColor': '#3c3836',
- 'tertiaryColor': '#504945'
- }
- }
-}%%
-flowchart TB
- %% External entities
- INTERNET([Internet])
-
- subgraph AWSArch["AWS Infrastructure"]
- R53[Route53] --> CF[CloudFront]
- CF --> ALB[Application Load Balancer]
-
- ALB --> EKS[EKS Cluster]
-
- EKS --> EC2[EC2 Instances]
-
- ECR[ECR Repositories] --> EKS
-
- RDS[RDS PostgreSQL] --- EKS
-
- S3[S3 Buckets] --- CF
+ MACHINED --> K8S_SERVICES[Kubernetes Services]
+ MACHINED --> SYSTEM[System Services]
+
+ K8S_SERVICES --> KUBELET[kubelet]
+ K8S_SERVICES --> API_SERVER[kube-apiserver]
+ K8S_SERVICES --> CONTROLLER[kube-controller-manager]
+ K8S_SERVICES --> SCHEDULER[kube-scheduler]
+ K8S_SERVICES --> ETCD[etcd]
+
+ SYSTEM --> NETWORKD[networkd Network Management]
+ SYSTEM --> TRUSTD[trustd Certificate Management]
end
-
- INTERNET --> R53
-
+
+ ADMIN --> TALOSCTL
+
+ KUBECTL[kubectl] --> |HTTPS:6443| API_SERVER
+
%% Style definitions - Gruvbox Dark theme
classDef external fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
- classDef network fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef compute fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef storage fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
- classDef database fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold,shape:cylinder
-
+ classDef api fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
+ classDef core fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+ classDef k8s fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
+ classDef system fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
+
%% Apply styles
- class INTERNET external
- class R53,CF,ALB network
- class EKS,EC2 compute
- class ECR,S3 storage
- class RDS database
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
- style AWSArch fill:#282828,color:#fabd2f,font-weight:bold
+ class ADMIN,TALOSCTL,KUBECTL external
+ class TALOSAPI api
+ class MACHINED,K8S_SERVICES,SYSTEM core
+ class KUBELET,API_SERVER,CONTROLLER,SCHEDULER,ETCD k8s
+ class NETWORKD,TRUSTD system
+
+ %% Explicit styling for subgraph
+ style TalosArch fill:#282828,color:#fabd2f,font-weight:bold
```
-## Data Flow
+### Talos Components
-This diagram illustrates how data flows through the system:
+- **machined**: Core system service that manages all other services
+- **Talos API**: gRPC API for cluster management (port 50000)
+- **kubelet**: Kubernetes node agent
+- **kube-apiserver**: Kubernetes API server (port 6443)
+- **kube-controller-manager**: Kubernetes controller manager
+- **kube-scheduler**: Kubernetes scheduler
+- **etcd**: Distributed key-value store for Kubernetes state
+- **networkd**: Network configuration and management
+- **trustd**: Certificate and PKI management
-```mermaid
-%%{
- init: {
- 'theme': 'base',
- 'themeVariables': {
- 'primaryColor': '#282828',
- 'primaryTextColor': '#ebdbb2',
- 'primaryBorderColor': '#7c6f64',
- 'lineColor': '#7c6f64',
- 'secondaryColor': '#3c3836',
- 'tertiaryColor': '#504945',
- 'actorBkg': '#3c3836',
- 'actorTextColor': '#ebdbb2',
- 'actorBorder': '#928374'
- }
- }
-}%%
-sequenceDiagram
- actor User as User
- participant FE as Frontend (Next.js)
- participant BE as Backend (Django)
- participant DB as PostgreSQL
- participant Worker as Celery Worker
-
- User->>FE: Access Application
- activate FE
- FE->>BE: GraphQL Query/Mutation
- activate BE
- BE->>DB: Database Query
- activate DB
- DB-->>BE: Query Results
- deactivate DB
- BE-->>FE: GraphQL Response
- deactivate BE
- FE-->>User: Display Data
- deactivate FE
-
- alt Async Process
- BE->>Worker: Queue Task
- activate Worker
- Worker->>DB: Process Data
- activate DB
- deactivate DB
- Worker->>BE: Task Result
- deactivate Worker
- end
-```
+## Deployment Flow
-## Development Workflow
+This diagram shows the deployment process from infrastructure provisioning to running cluster:
```mermaid
%%{
@@ -348,30 +222,29 @@ sequenceDiagram
}
}%%
flowchart LR
- subgraph DevWorkflow["Development Workflow"]
- CODE[Local Development] --> |Git Push| REPO[Repository]
- REPO --> |CI/CD Pipeline| CI[CI/CD Tests]
- CI --> |Deployment| K8S[Kubernetes]
- K8S --> |ArgoCD| ENV[Environment]
+ subgraph DeployFlow["Deployment Flow"]
+ TERRAFORM[1. Terraform Apply Provision AWS Resources] --> EC2[2. EC2 Instances Boot Talos Linux AMI]
+ EC2 --> GENCONFIG[3. Generate Configs talosctl gen config]
+ GENCONFIG --> APPLYCONFIG[4. Apply Configuration talosctl apply-config]
+ APPLYCONFIG --> BOOTSTRAP[5. Bootstrap Cluster talosctl bootstrap]
+ BOOTSTRAP --> KUBECONFIG[6. Generate kubeconfig talosctl kubeconfig]
+ KUBECONFIG --> RUNNING[7. Cluster Running kubectl get nodes]
end
-
+
%% Style definitions - Gruvbox Dark theme
- classDef dev fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef repo fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef ci fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
- classDef deploy fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
-
+ classDef terraform fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
+ classDef boot fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+ classDef config fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
+ classDef running fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
+
%% Apply styles
- class CODE dev
- class REPO repo
- class CI ci
- class K8S,ENV deploy
-
- %% Link styling
- linkStyle default stroke:#7c6f64,stroke-width:2px,stroke-dasharray: 5 5
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
- style DevWorkflow fill:#282828,color:#fabd2f,font-weight:bold
+ class TERRAFORM terraform
+ class EC2 boot
+ class GENCONFIG,APPLYCONFIG,BOOTSTRAP,KUBECONFIG config
+ class RUNNING running
+
+ %% Explicit styling for subgraph
+ style DeployFlow fill:#282828,color:#fabd2f,font-weight:bold
```
## Environment Architecture
@@ -394,33 +267,54 @@ The project supports multiple environments with different configurations:
}%%
flowchart TB
subgraph EnvArch["Environment Architecture"]
- BASE[Base Configuration] --> DEV[Development]
- BASE --> SANDBOX[Sandbox]
- BASE --> STAGING[Staging]
- BASE --> PROD[Production]
-
- DEV --> |Local Testing| DEV_ENV[Local Environment]
- SANDBOX --> |Testing| SANDBOX_ENV[Sandbox Environment]
- STAGING --> |Pre-Production| STAGING_ENV[Staging Environment]
- PROD --> |Production| PROD_ENV[Production Environment]
+ TERRAFORM[Terraform Base Module]
+
+ TERRAFORM --> SANDBOX[Sandbox Environment terraform/sandbox/]
+ TERRAFORM --> STAGING[Staging Environment terraform/staging/]
+ TERRAFORM --> PROD[Production Environment terraform/production/]
+
+ SANDBOX --> SANDBOX_CLUSTER[Talos Cluster k8s.sandbox.domain.com]
+ STAGING --> STAGING_CLUSTER[Talos Cluster k8s.staging.domain.com]
+ PROD --> PROD_CLUSTER[Talos Cluster k8s.prod.domain.com]
end
-
+
%% Style definitions - Gruvbox Dark theme
classDef base fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
- classDef dev fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+ classDef sandbox fill:#b16286,stroke:#8f3f71,stroke-width:2px,color:#282828,font-weight:bold
classDef staging fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
classDef prod fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
- classDef sandbox fill:#b16286,stroke:#8f3f71,stroke-width:2px,color:#282828,font-weight:bold
- classDef env fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
-
+ classDef cluster fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+
%% Apply styles
- class BASE base
- class DEV dev
+ class TERRAFORM base
class SANDBOX sandbox
class STAGING staging
class PROD prod
- class DEV_ENV,SANDBOX_ENV,STAGING_ENV,PROD_ENV env
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
+ class SANDBOX_CLUSTER,STAGING_CLUSTER,PROD_CLUSTER cluster
+
+ %% Explicit styling for subgraph
style EnvArch fill:#282828,color:#fabd2f,font-weight:bold
-```
\ No newline at end of file
+```
+
+## Security Architecture
+
+Talos Linux provides multiple layers of security:
+
+1. **No SSH Access**: Impossible to SSH into nodes, eliminating a major attack vector
+2. **Immutable File System**: Root filesystem is read-only and cannot be modified
+3. **API Authentication**: All management operations require mutual TLS authentication
+4. **Minimal Attack Surface**: Only runs Kubernetes components, nothing else
+5. **Encrypted Communication**: All API communication is encrypted
+6. **Certificate Management**: Automatic certificate rotation and management
+
+## Network Architecture
+
+Default ports and protocols:
+
+- **6443**: Kubernetes API server (HTTPS)
+- **50000**: Talos API (gRPC/TLS)
+- **50001**: Talos Trustd API (gRPC/TLS)
+- **2379-2380**: etcd client and peer communication
+- **10250**: kubelet API
+
+All inter-node communication is secured with TLS.
diff --git a/template/docs/dataflow.md b/template/docs/dataflow.md
deleted file mode 100644
index 74eb727..0000000
--- a/template/docs/dataflow.md
+++ /dev/null
@@ -1,695 +0,0 @@
-# :arrows_counterclockwise: Data Flow Diagrams
-
-This document provides detailed data flow diagrams for the application, visualizing how data moves through different components and stages of the system.
-
-## Comprehensive Data Flow
-
-This diagram shows the overall data flow across all components of the system, including frontend, backend, storage systems, and external integrations.
-
-```mermaid
-%%{
- init: {
- 'theme': 'base',
- 'themeVariables': {
- 'primaryColor': '#282828',
- 'primaryTextColor': '#ebdbb2',
- 'primaryBorderColor': '#7c6f64',
- 'lineColor': '#7c6f64',
- 'secondaryColor': '#3c3836',
- 'tertiaryColor': '#504945'
- }
- }
-}%%
-flowchart TD
- %% External entities
- User([End User])
- Developer([Developer])
-
- %% Data sources
- subgraph DataSources["Data Sources"]
- direction TB
- FormInput[("User Form Input")]
- APIData[("External API Data")]
- FileUploads[("File Uploads")]
- StoredData[("Database Records")]
- end
-
- %% Frontend data flow
- subgraph FrontendFlow["Frontend Data Flow"]
- direction TB
- NextPages["Next.js Pages"]
- Components["React Components"]
- ClientState["Client State Management"]
- ApolloClient["Apollo Client"]
- GQLQueries["GraphQL Queries"]
- GQLMutations["GraphQL Mutations"]
-
- NextPages --> Components
- Components --> ClientState
- ClientState --> Components
- Components --> ApolloClient
- ApolloClient --> GQLQueries
- ApolloClient --> GQLMutations
- end
-
- %% Backend data flow
- subgraph BackendFlow["Backend Data Flow"]
- direction TB
- DjangoViews["Django Views/GraphQL"]
- QueryResolvers["GraphQL Query Resolvers"]
- MutationResolvers["GraphQL Mutation Resolvers"]
- DjangoModels["Django Models"]
- ModelValidation["Model Validation"]
- Serialization["Data Serialization"]
- CeleryTasks["Async Celery Tasks"]
-
- DjangoViews --> QueryResolvers
- DjangoViews --> MutationResolvers
- QueryResolvers --> DjangoModels
- MutationResolvers --> ModelValidation
- ModelValidation --> DjangoModels
- DjangoModels --> Serialization
- MutationResolvers --> CeleryTasks
- CeleryTasks --> DjangoModels
- end
-
- %% Storage systems
- subgraph StorageSystems["Storage Systems"]
- direction TB
- PostgreSQL[(PostgreSQL)]
- Redis[(Redis)]
- S3Bucket[(S3 Bucket)]
-
- PostgreSQL --> StoredData
- end
-
- %% External systems
- subgraph ExternalSystems["External Systems"]
- direction TB
- EmailService["Email Service"]
- ExternalAPIs["Third-Party APIs"]
- end
-
- %% Deployment data flow
- subgraph DeploymentFlow["Deployment Data Flow"]
- direction TB
- SourceCode[("Source Code")]
- GitRepo["Git Repository"]
- CISystem["CI/CD Pipeline"]
- DockerImages["Docker Images"]
- ECRRepository["ECR Repository"]
- K8sManifests["Kubernetes Manifests"]
- ArgoCD["ArgoCD"]
- K8sCluster["Kubernetes Cluster"]
-
- SourceCode --> GitRepo
- GitRepo --> CISystem
- CISystem --> DockerImages
- DockerImages --> ECRRepository
- K8sManifests --> ArgoCD
- ECRRepository --> ArgoCD
- ArgoCD --> K8sCluster
- end
-
- %% Primary data flow connections
- %% User/Client interactions
- User --> FormInput
- User --> FileUploads
- FormInput --> Components
- FileUploads --> Components
-
- %% Frontend to backend
- GQLQueries --> DjangoViews
- GQLMutations --> DjangoViews
-
- %% Backend to storage
- DjangoModels --> PostgreSQL
- CeleryTasks --> Redis
- CeleryTasks --> S3Bucket
-
- %% External integrations
- CeleryTasks --> EmailService
- QueryResolvers --> ExternalAPIs
- ExternalAPIs --> APIData
- APIData --> QueryResolvers
-
- %% Deployment workflow
- Developer --> SourceCode
-
- %% Response flows
- PostgreSQL --> DjangoModels
- Serialization --> QueryResolvers
- Serialization --> MutationResolvers
- QueryResolvers --> DjangoViews
- MutationResolvers --> DjangoViews
- DjangoViews --> GQLQueries
- DjangoViews --> GQLMutations
- GQLQueries --> ApolloClient
- GQLMutations --> ApolloClient
- ApolloClient --> Components
- Components --> NextPages
- NextPages --> User
-
- %% Style definitions - Gruvbox Dark theme
- classDef external fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
- classDef frontend fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef backend fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef storage fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold,shape:cylinder
- classDef queue fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
- classDef data fill:#b16286,stroke:#8f3f71,stroke-width:2px,color:#282828,font-weight:bold
- classDef deployment fill:#98971a,stroke:#79740e,stroke-width:2px,color:#282828,font-weight:bold
- classDef external_system fill:#d65d0e,stroke:#af3a03,stroke-width:2px,color:#282828,font-weight:bold
-
- %% Apply styles
- class User,Developer external
- class NextPages,Components,ClientState,ApolloClient,GQLQueries,GQLMutations frontend
- class DjangoViews,QueryResolvers,MutationResolvers,DjangoModels,ModelValidation,Serialization,CeleryTasks backend
- class PostgreSQL,Redis,S3Bucket storage
- class FormInput,APIData,FileUploads,StoredData data
- class EmailService,ExternalAPIs external_system
- class SourceCode,GitRepo,CISystem,DockerImages,ECRRepository,K8sManifests,ArgoCD,K8sCluster deployment
-
- %% Explicit styling for subgraph titles
- style DataSources fill:#282828,color:#fabd2f,font-weight:bold
- style FrontendFlow fill:#282828,color:#fabd2f,font-weight:bold
- style BackendFlow fill:#282828,color:#fabd2f,font-weight:bold
- style StorageSystems fill:#282828,color:#fabd2f,font-weight:bold
- style ExternalSystems fill:#282828,color:#fabd2f,font-weight:bold
- style DeploymentFlow fill:#282828,color:#fabd2f,font-weight:bold
-```
-
-## CRUD Operations Flow
-
-This sequence diagram illustrates the detailed flow of Create, Read, Update, and Delete operations from user interaction through the entire stack.
-
-```mermaid
-%%{
- init: {
- 'theme': 'base',
- 'themeVariables': {
- 'primaryColor': '#282828',
- 'primaryTextColor': '#ebdbb2',
- 'primaryBorderColor': '#7c6f64',
- 'lineColor': '#7c6f64',
- 'secondaryColor': '#3c3836',
- 'tertiaryColor': '#504945',
- 'actorBkg': '#3c3836',
- 'actorTextColor': '#ebdbb2',
- 'actorBorder': '#928374'
- }
- }
-}%%
-sequenceDiagram
- actor User as User
- participant UI as Next.js UI
- participant Client as Apollo Client
- participant API as Django GraphQL API
- participant Validation as Input Validation
- participant Model as Django Models
- participant DB as PostgreSQL
- participant Background as Celery Tasks
-
- %% CREATE flow
- User->>UI: Submit Create Form
- activate UI
- UI->>Client: Execute create mutation
- activate Client
- Client->>API: GraphQL mutation request
- activate API
- API->>Validation: Validate input data
- activate Validation
-
- alt Invalid Data
- Validation-->>API: Validation errors
- API-->>Client: Return validation errors
- Client-->>UI: Display errors
- UI-->>User: Show error feedback
- else Valid Data
- Validation-->>API: Validated data
- deactivate Validation
- API->>Model: Create new object
- activate Model
- Model->>DB: INSERT query
- activate DB
- DB-->>Model: Confirmation
- deactivate DB
- Model-->>API: New object data
- deactivate Model
-
- opt Async Processing Needed
- API->>Background: Queue background task
- activate Background
- Background->>DB: Additional processing
- Background-->>API: Processing status
- deactivate Background
- end
-
- API-->>Client: Return success response
- deactivate API
- Client-->>UI: Update local cache/state
- deactivate Client
- UI-->>User: Display success confirmation
- UI->>UI: Redirect or update view
- end
- deactivate UI
-
- %% READ flow
- User->>UI: Request Data View
- activate UI
- UI->>Client: Execute query
- activate Client
- Client->>API: GraphQL query request
- activate API
- API->>Model: Retrieve data
- activate Model
- Model->>DB: SELECT query
- activate DB
- DB-->>Model: Query results
- deactivate DB
- Model-->>API: Format data
- deactivate Model
- API-->>Client: Return data
- deactivate API
- Client-->>UI: Update local state
- deactivate Client
- UI-->>User: Display data
- deactivate UI
-
- %% UPDATE flow
- User->>UI: Submit Edit Form
- activate UI
- UI->>Client: Execute update mutation
- activate Client
- Client->>API: GraphQL mutation request
- activate API
- API->>Validation: Validate input data
- activate Validation
-
- alt Invalid Data
- Validation-->>API: Validation errors
- API-->>Client: Return validation errors
- Client-->>UI: Display errors
- UI-->>User: Show error feedback
- else Valid Data
- Validation-->>API: Validated data
- deactivate Validation
- API->>Model: Update object
- activate Model
- Model->>DB: UPDATE query
- activate DB
- DB-->>Model: Confirmation
- deactivate DB
- Model-->>API: Updated object data
- deactivate Model
-
- opt Async Processing Needed
- API->>Background: Queue background task
- activate Background
- Background->>DB: Additional processing
- Background-->>API: Processing status
- deactivate Background
- end
-
- API-->>Client: Return success response
- deactivate API
- Client-->>UI: Update local cache/state
- deactivate Client
- UI-->>User: Display success confirmation
- end
- deactivate UI
-
- %% DELETE flow
- User->>UI: Request Delete
- activate UI
- UI->>UI: Confirm deletion
- UI->>Client: Execute delete mutation
- activate Client
- Client->>API: GraphQL mutation request
- activate API
- API->>Model: Delete object
- activate Model
- Model->>DB: DELETE query
- activate DB
- DB-->>Model: Confirmation
- deactivate DB
- Model-->>API: Success status
- deactivate Model
-
- opt Background Cleanup
- API->>Background: Queue cleanup task
- activate Background
- Background->>DB: Cleanup related data
- Background-->>API: Cleanup status
- deactivate Background
- end
-
- API-->>Client: Return success response
- deactivate API
- Client-->>UI: Update local cache/state
- deactivate Client
- UI-->>User: Display success confirmation
- UI->>UI: Redirect to list view
- deactivate UI
-```
-
-## API/Backend Data Processing Flow
-
-This diagram illustrates how data moves through the various layers of the backend, from API requests to database operations.
-
-```mermaid
-%%{
- init: {
- 'theme': 'base',
- 'themeVariables': {
- 'primaryColor': '#282828',
- 'primaryTextColor': '#ebdbb2',
- 'primaryBorderColor': '#7c6f64',
- 'lineColor': '#7c6f64',
- 'secondaryColor': '#3c3836',
- 'tertiaryColor': '#504945'
- }
- }
-}%%
-flowchart TD
- %% Request entry points
- GraphQLReq(["GraphQL Request"])
- RestReq(["REST API Request"])
- CeleryTask(["Scheduled/Triggered Task"])
-
- %% Authentication & permission layer
- subgraph AuthLayer["Authentication & Authorization"]
- direction LR
- TokenAuth["Token Authentication"]
- SessionAuth["Session Authentication"]
- Permissions["Permission Checks"]
-
- TokenAuth --> Permissions
- SessionAuth --> Permissions
- end
-
- %% Request processing layer
- subgraph APILayer["API Processing Layer"]
- direction LR
- GQLSchema["GraphQL Schema"]
- GQLResolvers["GraphQL Resolvers"]
- RestViews["REST API Views"]
- Serializers["Django Serializers"]
-
- GQLSchema --> GQLResolvers
- RestViews --> Serializers
- end
-
- %% Business logic layer
- subgraph BusinessLayer["Business Logic Layer"]
- direction TB
- Services["Service Functions"]
- ModelMethods["Model Methods"]
- HelperFuncs["Helper Utilities"]
- Validators["Data Validators"]
-
- Services --> ModelMethods
- Services --> HelperFuncs
- Services --> Validators
- end
-
- %% Data access layer
- subgraph DataLayer["Data Access Layer"]
- direction TB
- Models["Django Models"]
- Managers["Custom Managers"]
- QuerySets["QuerySet Methods"]
- RawSQL["Raw SQL (when needed)"]
-
- Models --> Managers
- Managers --> QuerySets
- Managers --> RawSQL
- end
-
- %% Database layer
- subgraph DBLayer["Database Layer"]
- direction LR
- ReadReplica[("Read Replica")]
- MainDB[("Primary Database")]
- Migrations["Django Migrations"]
-
- ReadReplica --- MainDB
- Migrations --> MainDB
- end
-
- %% Asynchronous processing
- subgraph AsyncLayer["Asynchronous Processing"]
- direction TB
- CeleryQueue["Celery Task Queue"]
- Workers["Celery Workers"]
- Results["Task Results Storage"]
-
- CeleryQueue --> Workers
- Workers --> Results
- end
-
- %% External services integration
- subgraph ExternalLayer["External Integrations"]
- direction LR
- Email["Email Service"]
- Storage["S3 Storage"]
- ThirdParty["3rd Party APIs"]
-
- Email --- Storage
- Storage --- ThirdParty
- end
-
- %% Main request flow
- GraphQLReq --> TokenAuth
- RestReq --> SessionAuth
- Permissions --> GQLSchema
- Permissions --> RestViews
- GQLResolvers --> Services
- Serializers --> Services
- Services --> Models
- Models --> MainDB
- Models --> ReadReplica
-
- %% Async flow
- GQLResolvers --> CeleryQueue
- RestViews --> CeleryQueue
- CeleryTask --> CeleryQueue
- Workers --> Services
- Workers --> ExternalLayer
-
- %% Return flow
- MainDB --> Models
- ReadReplica --> Models
- Models --> Services
- Models --> ModelMethods
- Services --> GQLResolvers
- Services --> Serializers
- GQLResolvers --> |Response| GraphQLReq
- Serializers --> |Response| RestReq
-
- %% Style definitions - Gruvbox Dark theme
- classDef input fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
- classDef auth fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef api fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef business fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
- classDef data fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
- classDef db fill:#b16286,stroke:#8f3f71,stroke-width:2px,color:#282828,font-weight:bold
- classDef async fill:#98971a,stroke:#79740e,stroke-width:2px,color:#282828,font-weight:bold
- classDef ext fill:#d65d0e,stroke:#af3a03,stroke-width:2px,color:#282828,font-weight:bold
-
- %% Apply styles
- class GraphQLReq,RestReq,CeleryTask input
- class TokenAuth,SessionAuth,Permissions auth
- class GQLSchema,GQLResolvers,RestViews,Serializers api
- class Services,ModelMethods,HelperFuncs,Validators business
- class Models,Managers,QuerySets,RawSQL data
- class ReadReplica,MainDB,Migrations db
- class CeleryQueue,Workers,Results async
- class Email,Storage,ThirdParty ext
-
- %% Explicit styling for subgraph titles
- style AuthLayer fill:#282828,color:#fabd2f,font-weight:bold
- style APILayer fill:#282828,color:#fabd2f,font-weight:bold
- style BusinessLayer fill:#282828,color:#fabd2f,font-weight:bold
- style DataLayer fill:#282828,color:#fabd2f,font-weight:bold
- style DBLayer fill:#282828,color:#fabd2f,font-weight:bold
- style AsyncLayer fill:#282828,color:#fabd2f,font-weight:bold
- style ExternalLayer fill:#282828,color:#fabd2f,font-weight:bold
-```
-
-## Deployment/Infrastructure Data Flow
-
-This diagram visualizes how code and configuration flow through the CI/CD pipeline to cloud and Kubernetes infrastructure.
-
-```mermaid
-%%{
- init: {
- 'theme': 'base',
- 'themeVariables': {
- 'primaryColor': '#282828',
- 'primaryTextColor': '#ebdbb2',
- 'primaryBorderColor': '#7c6f64',
- 'lineColor': '#7c6f64',
- 'secondaryColor': '#3c3836',
- 'tertiaryColor': '#504945'
- }
- }
-}%%
-flowchart TD
- %% Development sources
- Developer([Developer])
- LocalCode[("Local Code")]
-
- %% Source control
- subgraph SourceControl["Source Control"]
- direction TB
- GitRepo["Git Repository"]
- PRs["Pull Requests"]
- Branches["Feature/Release Branches"]
- MainBranch["Main Branch"]
-
- Branches --> PRs
- PRs --> MainBranch
- end
-
- %% CI/CD pipeline
- subgraph CIPipeline["CI/CD Pipeline"]
- direction TB
- GHActions["GitHub Actions"]
- Lint["Linting & Type Checking"]
- UnitTests["Unit Tests"]
- IntegTests["Integration Tests"]
- Build["Build Process"]
-
- GHActions --> Lint
- GHActions --> UnitTests
- Lint --> Build
- UnitTests --> Build
- IntegTests --> Build
- end
-
- %% Artifacts
- subgraph Artifacts["Build Artifacts"]
- direction TB
- FrontendImage["Frontend Docker Image"]
- BackendImage["Backend Docker Image"]
- CeleryImage["Celery Worker Image"]
- TerraformPlans["Terraform Plans"]
-
- FrontendImage --- BackendImage
- BackendImage --- CeleryImage
- end
-
- %% Infrastructure as code
- subgraph IaC["Infrastructure as Code"]
- direction TB
- TerraformModules["Terraform Modules"]
- TerraformVars["Environment Variables"]
- K8sManifests["Kubernetes Manifests"]
- KustomizeOverlays["Kustomize Overlays"]
-
- TerraformModules --> TerraformVars
- K8sManifests --> KustomizeOverlays
- end
-
- %% Cloud resources
- subgraph CloudResources["AWS Cloud Resources"]
- direction TB
- VPC["VPC & Networking"]
- EKS["EKS Cluster"]
- ECR["Container Registry"]
- RDS["RDS Database"]
- S3["S3 Buckets"]
- CF["CloudFront"]
- R53["Route 53"]
-
- VPC --> EKS
- ECR --> EKS
- EKS --> RDS
- S3 --> CF
- CF --> R53
- end
-
- %% Kubernetes deployment
- subgraph K8sDeployment["Kubernetes Deployment"]
- direction TB
- ArgoCD["ArgoCD"]
- CertManager["Cert Manager"]
- Ingress["Ingress Controller"]
- Secrets["Sealed Secrets"]
- Monitoring["Monitoring Stack"]
- FrontendSvc["Frontend Service"]
- BackendSvc["Backend Service"]
- CelerySvc["Celery Service"]
- RedisSvc["Redis Service"]
- DBSvc["Database Service"]
-
- ArgoCD --> CertManager
- ArgoCD --> Ingress
- ArgoCD --> Secrets
- ArgoCD --> Monitoring
- ArgoCD --> FrontendSvc
- ArgoCD --> BackendSvc
- ArgoCD --> CelerySvc
- ArgoCD --> RedisSvc
- ArgoCD --> DBSvc
- Secrets --> FrontendSvc
- Secrets --> BackendSvc
- Secrets --> CelerySvc
- Secrets --> DBSvc
- Ingress --> FrontendSvc
- Ingress --> BackendSvc
- end
-
- %% End user access
- EndUser([End User])
-
- %% Main flow connections
- Developer --> LocalCode
- LocalCode --> GitRepo
- MainBranch --> GHActions
- Build --> FrontendImage
- Build --> BackendImage
- Build --> CeleryImage
- FrontendImage --> ECR
- BackendImage --> ECR
- CeleryImage --> ECR
- MainBranch --> TerraformModules
- MainBranch --> K8sManifests
- TerraformVars --> VPC
- TerraformVars --> EKS
- TerraformVars --> ECR
- TerraformVars --> RDS
- TerraformVars --> S3
- TerraformVars --> CF
- TerraformVars --> R53
- KustomizeOverlays --> ArgoCD
- ECR --> ArgoCD
- EKS --> ArgoCD
- R53 --> EndUser
-
- %% Style definitions - Gruvbox Dark theme
- classDef external fill:#3c3836,stroke:#928374,stroke-width:1px,color:#ebdbb2,font-weight:bold
- classDef source fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef ci fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
- classDef artifact fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
- classDef iac fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
- classDef cloud fill:#b16286,stroke:#8f3f71,stroke-width:2px,color:#282828,font-weight:bold
- classDef k8s fill:#98971a,stroke:#79740e,stroke-width:2px,color:#282828,font-weight:bold
-
- %% Apply styles
- class Developer,EndUser,LocalCode external
- class GitRepo,PRs,Branches,MainBranch source
- class GHActions,Lint,UnitTests,IntegTests,Build ci
- class FrontendImage,BackendImage,CeleryImage,TerraformPlans artifact
- class TerraformModules,TerraformVars,K8sManifests,KustomizeOverlays iac
- class VPC,EKS,ECR,RDS,S3,CF,R53 cloud
- class ArgoCD,CertManager,Ingress,Secrets,Monitoring,FrontendSvc,BackendSvc,CelerySvc,RedisSvc,DBSvc k8s
-
- %% Explicit styling for subgraph titles
- style SourceControl fill:#282828,color:#fabd2f,font-weight:bold
- style CIPipeline fill:#282828,color:#fabd2f,font-weight:bold
- style Artifacts fill:#282828,color:#fabd2f,font-weight:bold
- style IaC fill:#282828,color:#fabd2f,font-weight:bold
- style CloudResources fill:#282828,color:#fabd2f,font-weight:bold
- style K8sDeployment fill:#282828,color:#fabd2f,font-weight:bold
-```
\ No newline at end of file
diff --git a/template/docs/debug.md b/template/docs/debug.md
deleted file mode 100644
index 96ea6e7..0000000
--- a/template/docs/debug.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# :bug: How to debug the application
-
-The steps below describe how to set up interactive debugging with PyCharm.
-
-## PyCharm Debugging Setup
-Update `k8s/base/app.configmap.yaml` with `data` field `PYTHONBREAKPOINT: "utils.pycharm_debugger"`
-
-In PyCharm:
-
-1. Go to 'Run' in the toolbar
-2. Click on 'Edit Configurations'
-3. Click on '+' in the top left of the dialog
-4. Select 'Python Debug Server'
-5. Set the host to 0.0.0.0 and the port to 6400, and the name as you see fit.
-6. For 'path mappings' set /path/to/{{ copier__project_slug}}/backend=/app/src
-7. Check 'Redirect console output to console'
-8. Remove check on 'Suspend after connect'.
-9. Click 'Ok'
-
-
-
-## Debugging in development
-Before the code you want to debug, add the following:
-
-```python
-breakpoint()
-```
-
-You must then set break points in your IDE and call the code as usual to hit them.
diff --git a/template/docs/deployment.md b/template/docs/deployment.md
index 27573e4..87ff078 100644
--- a/template/docs/deployment.md
+++ b/template/docs/deployment.md
@@ -1,10 +1,372 @@
-# :package: How to deploy
+# :package: How to Deploy a Talos Cluster
-## Infrastructure provisioning
+This guide walks through deploying a Talos Linux Kubernetes cluster on AWS, from infrastructure provisioning to cluster bootstrap.
-Terraform can be used to provision AWS resources for your project deployment.
-Read [terraform/README.md](/terraform/README.md) for more information and steps for provisioning resources.
+## Prerequisites
-## Application deployment
+Before you begin, ensure you have the following tools installed:
-Use ArgoCD and Kubernetes to automate the deployment of your application to your infrastructure. ArgoCD monitors changes within your repository, promptly applying the relevant Kubernetes manifests. Read [bootstrap-cluster/README.md](/bootstrap-cluster/README.md) for more details.
+- **AWS CLI** - Configured with appropriate credentials
+- **Terraform/OpenTofu** - Infrastructure provisioning (v1.6+)
+- **talosctl** - Talos cluster management CLI
+- **kubectl** - Kubernetes command-line tool
+- **Task** - Task runner for automation
+
+## Deployment Overview
+
+The deployment process consists of two main phases:
+
+1. **Infrastructure Provisioning** - Use Terraform to create AWS resources
+2. **Cluster Bootstrap** - Use Talos to initialize the Kubernetes cluster
+
+## Step 1: Configure AWS Credentials
+
+Ensure your AWS credentials are configured:
+
+```bash
+aws configure
+# OR
+export AWS_PROFILE=your-profile
+```
+
+Verify access:
+
+```bash
+aws sts get-caller-identity
+```
+
+## Step 2: Choose Your Environment
+
+The template supports three environments:
+
+- **sandbox** - Testing and experimentation
+- **staging** - Pre-production validation
+- **production** - Production workloads
+
+For this guide, we'll use **sandbox**. Replace with your chosen environment as needed.
+
+## Step 3: Create S3 Backend for Terraform State
+
+Before provisioning infrastructure, you need to create the S3 bucket and DynamoDB table for storing Terraform state.
+
+Navigate to the bootstrap directory:
+
+```bash
+cd terraform/bootstrap
+```
+
+Initialize and apply the bootstrap configuration:
+
+```bash
+tofu init && tofu plan -out=tfplan.out && tofu apply tfplan.out
+```
+
+This creates:
+- S3 bucket for storing Terraform state
+- DynamoDB table for state locking
+
+**Note:** This step only needs to be run once for all environments.
+
+## Step 4: Provision Infrastructure with Terraform
+
+### Initialize Terraform
+
+Navigate to the environment directory:
+
+```bash
+cd ../sandbox
+```
+
+Initialize Terraform:
+
+```bash
+tofu init
+```
+
+### Review and Apply
+
+Review the planned changes:
+
+```bash
+tofu plan
+```
+
+Apply the infrastructure:
+
+```bash
+tofu apply
+```
+
+Type `yes` when prompted to confirm.
+
+### What Gets Created
+
+Terraform provisions:
+
+- VPC with public subnets across availability zones
+- Security groups for Kubernetes and Talos APIs
+- EC2 instances with Talos Linux AMI (control plane nodes)
+- Elastic Load Balancer for control plane access
+- Route53 DNS record for cluster API endpoint
+- IAM roles for EC2 instances
+
+**Note:** EC2 instances will boot with Talos OS but the Kubernetes cluster is NOT yet initialized.
+
+## Step 5: Bootstrap the Talos Cluster
+
+After infrastructure is provisioned, bootstrap the Kubernetes cluster.
+
+### Navigate to Bootstrap Directory
+
+```bash
+cd ../../bootstrap-cluster/sandbox
+```
+
+### Review Environment Configuration
+
+Check the `.env` file for your environment:
+
+```bash
+cat .env
+```
+
+This contains:
+- `TALOS_FACTORY_IMAGE` - Talos version (v1.12.1)
+- `TOFU_DIR` - Path to Terraform directory
+
+### Run Bootstrap Process
+
+Execute the bootstrap task:
+
+```bash
+export ENV=sandbox
+task talos:bootstrap
+```
+
+This automated task performs the following steps:
+
+1. **Generate Configs** - Creates `talosconfig` and `controlplane.yaml`
+2. **Set Node IPs** - Configures Talos endpoints from Terraform output
+3. **Apply Configuration** - Pushes Talos config to all nodes
+4. **Bootstrap Kubernetes** - Initializes the Kubernetes cluster
+5. **Generate kubeconfig** - Creates kubectl configuration
+6. **Upgrade Talos** - Updates to specific v1.12.1 factory image
+
+### Monitor the Process
+
+You can monitor the bootstrap process via AWS Serial Console:
+
+1. Go to AWS Console → EC2 → Instances
+2. Select a control plane instance
+3. Actions → Monitor and troubleshoot → Get system log
+
+## Step 6: Verify Cluster Status
+
+### Check Talos Node Health
+
+```bash
+export TALOSCONFIG=./sandbox/talosconfig
+talosctl health --nodes
+```
+
+### Check Talos Version
+
+```bash
+talosctl version --nodes
+```
+
+### Access Kubernetes Cluster
+
+```bash
+export KUBECONFIG=./sandbox/kubeconfig
+kubectl get nodes
+```
+
+Expected output:
+```
+NAME STATUS ROLES AGE VERSION
+my-cluster-0 Ready control-plane 5m v1.31.x
+my-cluster-1 Ready control-plane 5m v1.31.x
+my-cluster-2 Ready control-plane 5m v1.31.x
+```
+
+### Check Kubernetes Components
+
+```bash
+kubectl get pods -n kube-system
+```
+
+All system pods should be Running.
+
+## Step 7: Store Credentials Securely
+
+The bootstrap process stores credentials in AWS Secrets Manager:
+
+- **Talosconfig** - Stored as `${ENV}_talosconfig_yaml`
+- **Kubeconfig** - Stored as `${ENV}_kubeconfig_yaml`
+
+Retrieve them later with:
+
+```bash
+# Get talosconfig
+aws secretsmanager get-secret-value \
+ --secret-id sandbox_talosconfig_yaml \
+ --query SecretString --output text | base64 -d > talosconfig
+
+# Get kubeconfig
+aws secretsmanager get-secret-value \
+ --secret-id sandbox_kubeconfig_yaml \
+ --query SecretString --output text | base64 -d > kubeconfig
+```
+
+## Common Bootstrap Tasks
+
+The `Taskfile.yml` in `bootstrap-cluster/` provides several useful tasks:
+
+### List Available Tasks
+
+```bash
+task --list
+```
+
+### Individual Bootstrap Steps
+
+If you need to run steps individually:
+
+```bash
+# Generate Talos configuration
+task talos:generate_configs
+
+# Apply config to nodes
+task talos:apply_talos_config
+
+# Bootstrap Kubernetes
+task talos:bootstrap_kubernetes
+
+# Generate kubeconfig
+task talos:generate_kubeconfig
+
+# Upgrade Talos version
+task talos:upgrade_talos
+
+# Check cluster health
+task talos:health
+```
+
+## Upgrading Talos
+
+To upgrade to a new Talos version:
+
+1. Update `TALOS_FACTORY_IMAGE` in `bootstrap-cluster/.env`
+2. Run the upgrade task:
+
+```bash
+export ENV=sandbox
+task talos:upgrade_talos
+```
+
+## Removing All Resources from an Environment
+
+**WARNING:** This will permanently destroy all resources and data in the environment. This action cannot be undone.
+
+### Step 1: Destroy the Environment Infrastructure
+
+Navigate to the environment directory and destroy all resources:
+
+```bash
+cd terraform/sandbox # or staging, production
+tofu destroy
+```
+
+Review the resources that will be destroyed and type `yes` when prompted.
+
+This will remove:
+- EC2 instances (control plane nodes)
+- Elastic Load Balancer
+- Route53 DNS records
+- Security groups
+- VPC and subnets
+- IAM roles
+
+### Step 2: Clean Up Local Configuration Files
+
+Remove the generated Talos and Kubernetes configuration files:
+
+```bash
+cd ../../bootstrap-cluster/sandbox # or staging, production
+rm -f talosconfig kubeconfig controlplane.yaml
+```
+
+### Step 3 (Optional): Remove Secrets from AWS Secrets Manager
+
+If you want to remove the stored credentials:
+
+```bash
+aws secretsmanager delete-secret --secret-id sandbox_talosconfig_yaml --force-delete-without-recovery
+aws secretsmanager delete-secret --secret-id sandbox_kubeconfig_yaml --force-delete-without-recovery
+aws secretsmanager delete-secret --secret-id sandbox_talos_controlplane_yaml --force-delete-without-recovery
+```
+
+### Step 4 (Optional): Destroy S3 Backend
+
+**WARNING:** Only do this if you want to remove ALL environments and start fresh. This will delete the Terraform state for all environments.
+
+```bash
+cd terraform/bootstrap
+tofu destroy
+```
+
+This removes:
+- S3 bucket storing Terraform state
+- DynamoDB table for state locking
+
+**Note:** You must destroy all environment infrastructure (sandbox, staging, production) before destroying the S3 backend.
+
+## Troubleshooting
+
+### Terraform Issues
+
+**Error:** "Error creating VPC"
+- Check AWS credentials and region configuration
+- Verify account limits for VPCs
+
+**Error:** "No Talos AMI found"
+- Verify the AWS region has Talos AMIs available
+- Check the AMI filter in `terraform/modules/base/ec2.tf`
+
+### Bootstrap Issues
+
+**Error:** "failed to dial"
+- Ensure security groups allow port 50000 (Talos API)
+- Verify EC2 instances are running
+- Check public IPs are accessible
+
+**Error:** "context deadline exceeded"
+- Talos nodes may still be booting (wait 2-3 minutes)
+- Check AWS Serial Console for boot logs
+
+### Cluster Not Healthy
+
+```bash
+# Check Talos service status
+talosctl --nodes services
+
+# View Talos logs
+talosctl --nodes logs kubelet
+
+# Check etcd health
+talosctl --nodes etcd members
+```
+
+## Next Steps
+
+After successfully deploying your cluster:
+
+1. **Deploy workloads** - Use `kubectl apply` to deploy applications
+2. **Install cluster add-ons** - CNI, CSI drivers, ingress controllers, etc.
+3. **Review security** - Configure RBAC, network policies, pod security standards
+
+For more information:
+- See [Architecture Documentation](./architecture.md)
+- Read the [Talos Documentation](https://www.talos.dev/docs/)
+- Check [Terraform README](../terraform/README.md)
diff --git a/template/docs/development.md b/template/docs/development.md
deleted file mode 100644
index 85d93e9..0000000
--- a/template/docs/development.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# :technologist: How to set up your local environment for development
-
-## Requirements
-
-To work with Kubernetes, you need to install some additional software packages.
-Depending on your operating system, the installation instructions may vary.
-
-The documentation and scripts in the repo are written to work with `kubectl`, `kind` and `Tilt`.
-
-Consult the links below if you prefer to use Minikube or Docker Desktop instead:
-* [minikube](https://minikube.sigs.k8s.io/docs/start/).
-* [Docker](https://docs.docker.com/get-docker/).
-
-## Setup your environment
-
-1. Get the repository
-
- $ git clone {{ copier__repo_url }}
- $ cd {{ copier__project_slug }}
-
-2. Prepare the environment variables. Edit the `.envrc` file to work for your environment.
-
- **For personal environment configurations**: Create a `.envrc.local` file for your personal development settings that won't be committed to version control:
-
- ```bash
- # Example .envrc.local file
- export DEBUG=true
- export LOG_LEVEL=debug
- export LOCAL_DEV_SETTING=custom_value
- ```
-
- The `.envrc.local` file will be automatically loaded when you enter the directory (after `.envrc`), allowing you to override or add environment variables without modifying the shared `.envrc` file.
-
-## Run the kubernetes cluster and the {{ copier__project_slug }} app to develop the code
-
-First load the environment variables, then run:
-
- $ make setup
- $ tilt up
-
-:information_source: It may take a little bit of time for all the services to start up, and it's possible for
-the first run to fail because of timing conflicts. If you do see messages indicating there
-were errors during the first run, stop all the containers using Ctrl-C, and then try it again.
-
-You are now ready to edit the code.
-The app will be automatically reloaded when its files change.
-
-To delete resources created by Tilt once you are done working:
-
- $ tilt down
-
-This will not delete persistent volumes created by Tilt, and you should be able to start Tilt again with your data intact.
-
-To remove the cluster entirely:
-
- $ kind delete cluster --name {{ copier__project_slug }}
-
-To switch between different Scaf project contexts:
-
- $ tilt down # inside the codebase of the previous project
- $ make setup # inside the codebase of the project you want to work on
- $ tilt up
-
-## Next steps
-
-Creating a superuser account in the backend is useful so you have access to
-Django Admin that will be accessible at [http://localhost:8000/admin](http://localhost:8000/admin)
-
-To create a superuser use the following commands:
-
- $ make shell-backend
- $ ./manage.py createsuperuser
-{% if copier__create_nextjs_frontend %}
-This project has a NextJS frontend configured. You can access it at [http://localhost:3000/](http://localhost:3000/).
-{% endif %}
-
-## Update dependencies
-
-To update the backend app dependencies, you must edit the `backend/requirements/*.in` files.
-Once you have made your changes, you need to regenerate the `backend/requirements/*.txt` files using:
-
- $ make compile
-
-
-## Resource Limits Consideration
-
-Resource limits have been predefined for both Django and NextJS services to ensure optimal performance and efficient resource utilization:
-
-- **Django**:
- - Requests: `cpu: 200m`, `memory: 300Mi`
- - Limits: `cpu: 250m`, `memory: 400Mi`
-{% if copier__create_nextjs_frontend %}
-- **NextJS**:
- - Requests: `cpu: 100m`, `memory: 200Mi`
- - Limits: `cpu: 250m`, `memory: 300Mi`
-{% endif %}
-
-Ensure these values are appropriate for your environment. If needed, adjust them based on real workload observations in a staging or production environment to balance performance and resource consumption.
\ No newline at end of file
diff --git a/template/docs/monitoring.md b/template/docs/monitoring.md
deleted file mode 100644
index 4bd99b0..0000000
--- a/template/docs/monitoring.md
+++ /dev/null
@@ -1,91 +0,0 @@
-# :microscope: How to monitor the application
-{% if copier__use_sentry %}
-### How to monitor errors
-
-Sentry can be used for error reporting at the application level. Sentry is included as a dependency in the project requirements, and the SENTRY_DSN configuration variable is included in the Django config map.
-Next, one needs to add the project to Sentry by following the steps below:
-
-Note: The values for both tokens can be empty if you don't wish to use Sentry.
-
-1. Create two projects in your organisation's Sentry instance, e.g. https://sixfeetup.sentry.io/projects/
- 1. One project for the backend
- 2. One project for the frontend
-2. Configure Slack notifications
-3. Add team members in Sentry
-4. Update `k8s/base/app.configmap.yaml` `SENTRY_DSN_BACKEND`, `VITE_SENTRY_DSN_FRONTEND` with the DSNs appropriate for the relevant Sentry projects.
-
-For more detailed steps view [Sentry specific documentation](/docs/sentry.md)
-
-{% endif %}
-### How to monitor logs and the deployed application
-
-Install Loki for log aggregation and the Kube Prometheus Stack with Grafana Dashboards for monitoring.
-
-#### Setup AWS credentials
-
-First export the credentials to your environment variables. Change the values accordingly:
-
-```
-export AWS_ACCESS_KEY_ID='ABC123456'
-export AWS_SECRET_ACCESS_KEY='ABC123456'
-```
-
-Then create a secret in the monitoring namespace:
-
-```
-kubectl create secret generic iam-loki-s3 --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY -n monitoring
-```
-
-#### Install monitoring
-
-Before installing the monitoring tools, you will need to export the GRAFANA_ADMIN_PASSWORD environment variable. This will be used to set the Grafana admin password. Change the value accordingly:
-
-```
-export GRAFANA_ADMIN_PASSWORD='ABC123456'
-```
-
-Now install the loki-stack and kube-prometheus-stack helm charts:
-
-```
-make monitoring-up
-```
-
-If you want to store the logs in an S3 bucket, you will need to include the yaml values `k8s/_monitoring/loki-stack-values.yaml` file for the `helm install loki` command in the Makefile:
-
-```
-helm install loki grafana/loki-stack --values k8s/_monitoring/loki-stack-values.yaml --namespace monitoring --create-namespace
-```
-
-#### Connect to Grafana dashboard
-
-You can connect to Grafana through local port forwarding using the steps below. Alternatively, you can set up ingress to point to Grafana.
-
-```
-make monitoring-port-forward
-```
-
-And open http://localhost:8080 on your browser
-
-Login with admin / prom-operator that are the default values. To see these values, run
-
-```
-make monitoring-login
-```
-
-Login to Grafana. Hit the `Explore` button and this gets you to the place with existing data sources. Select the newly added Loki data source.
-
-You are also able to change the password for the Grafana admin user. To do this, run the following command:
-
-```
-kubectl exec --namespace monitoring -c grafana -it $(kubectl get pods --namespace monitoring -l "app.kubernetes.io/name=grafana" -o jsonpath="{.items[0].metadata.name}") -- grafana-cli admin reset-admin-password newpassword
-```
-
-By default, you are on the code view, and you can hit the 'label browser' option on the left side and make a selection based on a number of items - eg select namespace and the namespace that interests you. Hit the `Live` mode on the right side of the screen to see logs in real time - a good check that things are setup as expected!
-
-#### Create a dashboard
-
-There is a predefined django logs table dashboard that can be created with the following command:
-
-```
-make monitoring-dashboard
-```
diff --git a/template/docs/project-overview.md b/template/docs/project-overview.md
index bd549b1..4efad39 100644
--- a/template/docs/project-overview.md
+++ b/template/docs/project-overview.md
@@ -1,16 +1,16 @@
# :telescope: Project Overview
-This project template provides a full-stack application scaffold with integrated deployment tooling. It's designed to quickly bootstrap new projects with a standardized architecture and best practices.
+This project template provides infrastructure-as-code for deploying production-ready Talos Linux Kubernetes clusters on AWS. It's designed to quickly bootstrap new Kubernetes clusters with a standardized, secure, and immutable infrastructure.
## Purpose
The purpose of this template is to:
-1. Provide a consistent starting point for new projects
-2. Implement best practices for development, testing, and deployment
-3. Reduce setup time for new projects
+1. Provide a consistent starting point for new Kubernetes clusters
+2. Implement security best practices with immutable infrastructure (Talos Linux)
+3. Reduce cluster setup time from days to hours
4. Ensure infrastructure is defined as code from the beginning
-5. Enable rapid iteration and deployment to various environments
+5. Enable deployment to multiple environments (sandbox, staging, production)
## Core Technologies
@@ -30,46 +30,43 @@ The purpose of this template is to:
}%%
flowchart LR
subgraph Temp["Core Technologies"]
- Frontend["Frontend
- Next.js
- TypeScript
- Apollo Client
- GraphQL
- TailwindCSS
- Vitest"]
-
- Backend["Backend
- Django
- GraphQL
- Celery
- Redis
- PostgreSQL"]
-
- Infrastructure["Infrastructure
- Kubernetes
- Docker
- ArgoCD
- Terraform"]
-
- DevOps["DevOps
- CI/CD
- GitOps
- Secrets Management"]
+ OS["Operating System
+ Talos Linux v1.12.1
+ Immutable
+ API-Driven
+ Kubernetes-Native"]
+
+ Infra["Infrastructure
+ Terraform/OpenTofu
+ AWS EC2
+ VPC Networking
+ Route53 DNS"]
+
+ K8s["Kubernetes
+ Control Plane
+ etcd
+ API Server
+ Scheduler"]
+
+ Tools["Tooling
+ talosctl
+ kubectl
+ Task Automation"]
end
-
+
%% Style definitions - Gruvbox Dark theme
- classDef frontend fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
- classDef backend fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+ classDef os fill:#d79921,stroke:#b57614,stroke-width:2px,color:#282828,font-weight:bold
classDef infra fill:#458588,stroke:#076678,stroke-width:2px,color:#282828,font-weight:bold
- classDef devops fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
-
+ classDef k8s fill:#689d6a,stroke:#427b58,stroke-width:2px,color:#282828,font-weight:bold
+ classDef tools fill:#cc241d,stroke:#9d0006,stroke-width:2px,color:#282828,font-weight:bold
+
%% Apply styles
- class Frontend frontend
- class Backend backend
- class Infrastructure infra
- class DevOps devops
-
- %% Explicit styling for subgraph titles - works in both light and dark modes
+ class OS os
+ class Infra infra
+ class K8s k8s
+ class Tools tools
+
+ %% Explicit styling for subgraph titles
style Temp fill:#282828,color:#fabd2f,font-weight:bold
```
@@ -77,31 +74,56 @@ flowchart LR
The template follows these design principles:
-1. **Infrastructure as Code**: All infrastructure is defined in Terraform and Kubernetes manifests
-2. **GitOps**: Deployment is managed through Git using ArgoCD
-3. **Environment Parity**: Development, sandbox, staging, and production environments use the same configuration base
-4. **Containerization**: All application components run in containers
-5. **Microservices**: The application is structured as separate services (frontend, backend, workers)
-6. **API-First**: Backend exposes GraphQL API consumed by the frontend
-7. **Async Processing**: Long-running tasks are handled asynchronously with Celery
+1. **Infrastructure as Code**: All infrastructure defined in Terraform
+2. **Immutable Infrastructure**: Talos OS cannot be modified at runtime
+3. **Security First**: No SSH access, minimal attack surface, API-only management
+4. **Environment Parity**: Sandbox, staging, and production use identical base configuration
+5. **Declarative Configuration**: All cluster configuration managed via talosconfig files
+6. **API-Driven Operations**: All management operations via `talosctl` CLI
## How to Use This Template
-1. Create a new project using Copier with this template
-2. Fill in the required variables during setup
-3. Initialize the Git repository
-4. Start development with the included tooling
+1. Generate a new project using Copier with this template
+2. Fill in the required variables (project name, AWS region, domain, etc.)
+3. Configure AWS credentials
+4. Deploy infrastructure with Terraform
+5. Bootstrap Talos cluster with provided scripts
+6. Deploy your applications to the cluster
-For more details on the architecture, see the [Architecture Documentation](./architecture.md).
+For detailed instructions, see the [Deployment Documentation](./deployment.md).
## Key Features
-- Next.js frontend with TypeScript and GraphQL integration
-- Django backend with GraphQL API
-- Celery for background task processing
-- Kubernetes manifests for deployment
-- Terraform for infrastructure provisioning
-- CI/CD pipeline configuration
-- Comprehensive documentation
-- Development environment setup
-- Testing infrastructure
\ No newline at end of file
+- **Talos Linux v1.12.1** - Latest stable Talos OS with custom factory image
+- **AWS Infrastructure** - VPC, EC2 instances, security groups, load balancers
+- **Multi-Environment Support** - Sandbox, staging, and production configurations
+- **Terraform/OpenTofu** - Infrastructure provisioning and state management
+- **Custom Extensions** - ECR credential provider for AWS container registry
+- **High Availability** - Configurable number of control plane nodes
+- **Secure by Default** - API-only management, no SSH access
+- **DNS Integration** - Automatic Route53 configuration
+
+## What You Get
+
+After deployment, you'll have:
+
+- A fully functional Kubernetes cluster running on Talos Linux
+- EC2 instances configured as Kubernetes control plane nodes
+- VPC with proper networking and security groups
+- DNS records for cluster API access
+- `talosconfig` for cluster management
+- `kubeconfig` for kubectl access
+- Ready to deploy your workloads
+
+## What This Template Does NOT Include
+
+This is a **bare cluster template**. It does not include:
+
+- Application deployments
+- Database services
+- Message queues
+- Monitoring/logging solutions
+- CI/CD pipelines
+- Application load balancing
+
+You can deploy any of these on top of the cluster once it's running.
diff --git a/template/docs/qa.md b/template/docs/qa.md
deleted file mode 100644
index df3c397..0000000
--- a/template/docs/qa.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# :test_tube: How to test the application
-
-TODO: Document all the testing and QA processes.
diff --git a/template/docs/secrets.md b/template/docs/secrets.md
index 9c652ed..1f01489 100644
--- a/template/docs/secrets.md
+++ b/template/docs/secrets.md
@@ -1,36 +1,322 @@
-# :shushing_face: How to manage passwords and sensitive values
+# :shushing_face: How to Manage Cluster Credentials
-## Using SealedSecrets
+This document describes how to manage sensitive cluster credentials for your Talos Kubernetes cluster.
-SealedSecrets can be used to encrypt passwords for the values to be safely checked in.
-Creating a new secret involves encrypting the secret using kubeseal. [Installing kubeseal](https://github.com/bitnami-labs/sealed-secrets#kubeseal).
+## Cluster Credentials Overview
-Configure kubernetes to your current project config and context, making sure you are in the correct prod/sandbox environment
+The Talos cluster has two primary credential files:
- $ export KUBECONFIG=~/.kube/config:~/.kube/{{ copier__project_slug }}.ec2.config
- $ kubectl config use-context {{ copier__project_slug }}-environment
+1. **talosconfig** - Used by `talosctl` to manage the Talos operating system
+2. **kubeconfig** - Used by `kubectl` to interact with Kubernetes
-(replace environment with the actual environment name, e.g. `sandbox` or `production`)
+Both files contain TLS certificates and keys that provide administrative access to the cluster.
+## Automatic Storage in AWS Secrets Manager
-Add the secrets to your manifest using the secrets template file, and run kubeseal on the unencrypted values. The makefile target `sandbox-secrets` will replace the variables in `./k8s/templates/secrets.yaml.template` with the encoded variables from the environment, and copy the manifest with the encrypted values to `.k8s/overlays/sandbox/secrets.yaml`. The same can be done for the prod environment using the `prod-secrets` target
+During the bootstrap process (`task talos:bootstrap`), credentials are automatically stored in AWS Secrets Manager:
- $ make sandbox-secrets
+### Secrets Created
- $ make prod-secrets
+- **Talosconfig**: Stored as `${ENV}_talosconfig_yaml` (e.g., `sandbox_talosconfig_yaml`)
+- **Kubeconfig**: Stored as `${ENV}_kubeconfig_yaml` (e.g., `sandbox_kubeconfig_yaml`)
+- **Control Plane Config**: Stored as `${ENV}_controlplane_yaml`
-The `k8s/*/secrets.yaml` file can now be safely checked in. The passwords will be unencrypted by SealedSecrets in the cluster.
-When a secret is added/removed update the `k8s/templates` files, update the environment variables in .envrc and rerun the make commands.
+These secrets are base64-encoded before storage.
-The decrypted values can be retrieved running:
+## Retrieving Credentials
- $ kubectl get secret secrets-config -n {{ copier__project_dash }} -o yaml > unsealed_secrets.yaml
+### From AWS Secrets Manager
-## Using .envrc file
+Retrieve credentials from AWS Secrets Manager using the AWS CLI:
-To ease managing your passwords and secrets you can store the values in 1Password.
-You will need to install and configure [1Password cli](https://developer.1password.com/docs/cli/get-started/)
+```bash
+# Retrieve talosconfig
+aws secretsmanager get-secret-value \
+ --secret-id sandbox_talosconfig_yaml \
+ --query SecretString --output text | base64 -d > talosconfig
-You can automatically source from the `.envrc` file using [direnv](https://direnv.net/docs/installation.html)
+# Retrieve kubeconfig
+aws secretsmanager get-secret-value \
+ --secret-id sandbox_kubeconfig_yaml \
+ --query SecretString --output text | base64 -d > kubeconfig
+```
-You can also manually export the variables to your environment using `source .envrc`
+### From Local Bootstrap Directory
+
+After running `task talos:bootstrap`, credentials are also stored locally:
+
+```bash
+bootstrap-cluster/sandbox/
+├── talosconfig # Talos management credentials
+├── kubeconfig # Kubernetes access credentials
+└── controlplane.yaml # Control plane configuration
+```
+
+## Using Credentials
+
+### Using talosctl
+
+Set the `TALOSCONFIG` environment variable:
+
+```bash
+export TALOSCONFIG=./bootstrap-cluster/sandbox/talosconfig
+
+# Check Talos version
+talosctl version --nodes
+
+# View cluster health
+talosctl health --nodes
+
+# List all services
+talosctl services --nodes
+```
+
+Or specify the config file explicitly:
+
+```bash
+talosctl --talosconfig ./talosconfig health
+```
+
+### Using kubectl
+
+Set the `KUBECONFIG` environment variable:
+
+```bash
+export KUBECONFIG=./bootstrap-cluster/sandbox/kubeconfig
+
+# Get cluster nodes
+kubectl get nodes
+
+# View all pods
+kubectl get pods -A
+
+# Check cluster info
+kubectl cluster-info
+```
+
+Or specify the config file explicitly:
+
+```bash
+kubectl --kubeconfig ./kubeconfig get nodes
+```
+
+## Credential Security Best Practices
+
+### 1. Never Commit Credentials to Git
+
+The `.gitignore` file already excludes these files:
+
+```gitignore
+talosconfig
+kubeconfig
+controlplane.yaml
+```
+
+Ensure these patterns remain in your `.gitignore`.
+
+### 2. Restrict File Permissions
+
+Limit credential file permissions:
+
+```bash
+chmod 600 talosconfig
+chmod 600 kubeconfig
+```
+
+### 3. Use Separate Credentials Per Environment
+
+Each environment (sandbox, staging, production) has its own credentials:
+
+```
+bootstrap-cluster/
+├── sandbox/
+│ ├── talosconfig
+│ └── kubeconfig
+├── staging/
+│ ├── talosconfig
+│ └── kubeconfig
+└── production/
+ ├── talosconfig
+ └── kubeconfig
+```
+
+Never reuse credentials across environments.
+
+### 4. Rotate Credentials Regularly
+
+Talos certificates have expiration dates. Monitor and rotate before expiry:
+
+```bash
+# Check certificate expiration
+talosctl config info
+
+# Rotate Kubernetes certificates (automatically handled by Talos)
+```
+
+### 5. Use IAM for AWS Secrets Manager Access
+
+Control who can retrieve credentials from AWS Secrets Manager using IAM policies:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "secretsmanager:GetSecretValue"
+ ],
+ "Resource": "arn:aws:secretsmanager:region:account:secret:sandbox_*"
+ }
+ ]
+}
+```
+
+## Team Access Patterns
+
+### For New Team Members
+
+1. Ensure they have AWS CLI configured with appropriate IAM permissions
+2. Provide them with the environment name (e.g., "sandbox")
+3. They retrieve credentials from AWS Secrets Manager:
+
+```bash
+# Set environment
+export ENV=sandbox
+
+# Retrieve talosconfig
+aws secretsmanager get-secret-value \
+ --secret-id ${ENV}_talosconfig_yaml \
+ --query SecretString --output text | base64 -d > talosconfig
+
+# Retrieve kubeconfig
+aws secretsmanager get-secret-value \
+ --secret-id ${ENV}_kubeconfig_yaml \
+ --query SecretString --output text | base64 -d > kubeconfig
+
+# Set environment variables
+export TALOSCONFIG=$(pwd)/talosconfig
+export KUBECONFIG=$(pwd)/kubeconfig
+
+# Verify access
+talosctl version
+kubectl get nodes
+```
+
+### For CI/CD Pipelines
+
+CI/CD systems can retrieve credentials from AWS Secrets Manager using IAM roles:
+
+```bash
+# In GitHub Actions, GitLab CI, etc.
+export TALOSCONFIG=$(mktemp)
+export KUBECONFIG=$(mktemp)
+
+aws secretsmanager get-secret-value \
+ --secret-id ${ENV}_talosconfig_yaml \
+ --query SecretString --output text | base64 -d > $TALOSCONFIG
+
+aws secretsmanager get-secret-value \
+ --secret-id ${ENV}_kubeconfig_yaml \
+ --query SecretString --output text | base64 -d > $KUBECONFIG
+
+# Now run kubectl or talosctl commands
+kubectl apply -f manifests/
+```
+
+## Managing Application Secrets
+
+This template provides a **bare Kubernetes cluster** without application secret management.
+
+For application secrets, you can deploy one of the following solutions on your cluster:
+
+### Option 1: Sealed Secrets
+
+Encrypt secrets in Git using Sealed Secrets:
+
+```bash
+# Install Sealed Secrets controller
+kubectl apply -f https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.24.0/controller.yaml
+
+# Install kubeseal CLI
+# See: https://github.com/bitnami-labs/sealed-secrets#kubeseal
+
+# Create and seal a secret
+echo -n mypassword | kubectl create secret generic mysecret \
+ --dry-run=client --from-file=password=/dev/stdin -o yaml | \
+ kubeseal -o yaml > mysealedsecret.yaml
+
+# Commit sealed secret to Git
+git add mysealedsecret.yaml
+```
+
+### Option 2: External Secrets Operator
+
+Sync secrets from AWS Secrets Manager to Kubernetes:
+
+```bash
+# Install External Secrets Operator
+helm install external-secrets \
+ external-secrets/external-secrets \
+ -n external-secrets-system --create-namespace
+
+# Create ExternalSecret resource pointing to AWS Secrets Manager
+```
+
+### Option 3: HashiCorp Vault
+
+Use Vault for centralized secret management.
+
+## Updating Stored Credentials
+
+If you regenerate cluster credentials, update AWS Secrets Manager:
+
+```bash
+# Update talosconfig
+aws secretsmanager update-secret \
+ --secret-id ${ENV}_talosconfig_yaml \
+ --secret-string "$(base64 -w0 talosconfig)"
+
+# Update kubeconfig
+aws secretsmanager update-secret \
+ --secret-id ${ENV}_kubeconfig_yaml \
+ --secret-string "$(base64 -w0 kubeconfig)"
+```
+
+## Troubleshooting
+
+### "AccessDenied" when retrieving from Secrets Manager
+
+Check your IAM permissions:
+
+```bash
+aws iam get-user
+aws sts get-caller-identity
+```
+
+Verify the secret exists:
+
+```bash
+aws secretsmanager list-secrets | grep ${ENV}
+```
+
+### "certificate has expired" error
+
+Talos automatically rotates certificates. If you encounter expiry issues:
+
+```bash
+# Regenerate kubeconfig
+talosctl kubeconfig --force
+```
+
+### Lost credentials
+
+If you lose local credentials but they're in AWS Secrets Manager, retrieve them as shown above.
+
+If credentials are completely lost, you may need to bootstrap a new control plane, which requires cluster recreation.
+
+## References
+
+- [Talos Security Documentation](https://www.talos.dev/docs/security/)
+- [AWS Secrets Manager Documentation](https://docs.aws.amazon.com/secretsmanager/)
+- [Kubernetes Secret Management](https://kubernetes.io/docs/concepts/configuration/secret/)
diff --git a/template/docs/sentry.md b/template/docs/sentry.md
deleted file mode 100644
index 56e676a..0000000
--- a/template/docs/sentry.md
+++ /dev/null
@@ -1,51 +0,0 @@
-## Detailed steps for creating and configuring a project in Sentry
-
-### Create the Project in Sentry
-
-1. Log in to sentry.io and click the Create Project button
-2. Choose Django
-3. Name the Project with client-backend naming convention (ex. {{ copier__project_dash }}-backend)
-4. Assign a team (create a team, if needed, with the plus icon next to the team dropdown). Teams should be based around the people involved in a project or projects that need to know about errors, not just the client.
-5. Click the Create Project button
-6. Repeat the above steps with the following changes:
-7. Choose Next.js
-8. Name the Project with client-frontend naming convention (ex. {{ copier__project_dash }}-frontend)
-9. Assign a team (create a team, if needed, with the plus icon next to the team dropdown). Teams should be based around the people involved in a project or projects that need to know about errors, not just the client.
-10. Click the Create Project button
-
-### Configure Slack Notifications
-
-1. Click on Setting on the sidebar
-2. Click on Projects on the secondary sidebar that is revealed
-3. Click on the Project created in the previous section
-4. Click on Alerts on the new secondary sidebar
-5. Edit the existing rule or Add a new rule if there is not one already
-6. Use “Send a notification for new issues” for the Rule Name
-7. Choose “An issue is first seen” for the Conditions
-8. Leave “All Environments”
-9. Delete the “Send a notification (for all legacy integrations)” Action, if it exists, to disable sending emails
-10. Add an Action and choose “Send a notification to the Slack workspace” option from the menu and fill in the appropriate `#channel` name
-
-### Add Team Members
-1. Click Settings on the sidebar
-2. Click Teams on the secondary sidebar that is revealed
-3. Click on the Team assigned to the Project you just created
-4. Click Add Member to add additional team members as needed for the project
-
-### Add code to Django
-
-By default this is in the base config. Make sure the following is to your preferences in all logical locations in the `backend/config/settings/{environment}.py` files:
-
-```
- import sentry_sdk
- ...
- sentry_sdk.init(
- dsn=env.str("SENTRY_DSN_BACKEND", default=""),
- environment=env.str("ENVIRONMENT", default="production"),
- release=env.str("RELEASE", default="dev"),
- )
-```
-
-Update `k8s/base/app.configmap.yaml` `SENTRY_DSN_BACKEND`, `VITE_SENTRY_DSN_FRONTEND` with the DSNs provided for the relevant Sentry projects.
-
-You can find them in Sentry by clicking Settings on the sidebar, then Projects on the secondary sidebar, then the project, then Client Keys (DSN)
diff --git a/template/flake.nix b/template/flake.nix
index ac0a177..1b890ae 100644
--- a/template/flake.nix
+++ b/template/flake.nix
@@ -29,7 +29,6 @@
pip
]))
awscli2
- argocd
bashInteractive
copier
coreutils
diff --git a/template/frontend/.dockerignore b/template/frontend/.dockerignore
deleted file mode 100644
index 603d148..0000000
--- a/template/frontend/.dockerignore
+++ /dev/null
@@ -1,11 +0,0 @@
-# Items that don't need to be in a Docker image.
-# Anything not used by the build system should go here.
-Dockerfile
-.dockerignore
-.gitignore
-README.md
-
-# Artifacts that will be built during image creation.
-# This should contain all files created during `npm run build`.
-*/build
-*/node_modules
diff --git a/template/frontend/.env.local.example b/template/frontend/.env.local.example
deleted file mode 100644
index 69cba1a..0000000
--- a/template/frontend/.env.local.example
+++ /dev/null
@@ -1,3 +0,0 @@
-IS_PRE_PUSH_HOOKS_ENABLED=true
-NEXT_PUBLIC_GRAPHQL_ENDPOINT=http://localhost:8000/graphql/
-NEXT_GRAPHQL_ENDPOINT=http://backend:8000/graphql/
diff --git a/template/frontend/.gitignore b/template/frontend/.gitignore
deleted file mode 100644
index 4ce2256..0000000
--- a/template/frontend/.gitignore
+++ /dev/null
@@ -1,38 +0,0 @@
-# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
-
-# dependencies
-/node_modules
-/.pnp
-.pnp.js
-.yarn/install-state.gz
-
-# testing
-/coverage
-
-# next.js
-/.next/
-/out/
-
-# production
-/build
-
-# misc
-.DS_Store
-*.pem
-
-# debug
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-
-# local env files
-.env*.local
-.env
-
-# vercel
-.vercel
-
-# typescript
-*.tsbuildinfo
-next-env.d.ts
-!lib/
diff --git a/template/frontend/.husky/pre-push b/template/frontend/.husky/pre-push
deleted file mode 100644
index d3b14bc..0000000
--- a/template/frontend/.husky/pre-push
+++ /dev/null
@@ -1,9 +0,0 @@
-cd frontend
-if test -f ./.env.local; then
- source ./.env.local
- if [ "$IS_PRE_PUSH_HOOKS_ENABLED" = "true" ] || [ "$IS_PRE_PUSH_HOOKS_ENABLED" = "1" ]; then
- npm run format-check
- npm run lint-src
- npm run typecheck
- fi
-fi
diff --git a/template/frontend/.prettierignore b/template/frontend/.prettierignore
deleted file mode 100644
index be956f3..0000000
--- a/template/frontend/.prettierignore
+++ /dev/null
@@ -1,5 +0,0 @@
-node_modules
-.next
-out
-public
-__generated__
\ No newline at end of file
diff --git a/template/frontend/.prettierrc b/template/frontend/.prettierrc
deleted file mode 100644
index 8992f03..0000000
--- a/template/frontend/.prettierrc
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "arrowParens": "avoid",
- "bracketSpacing": true,
- "jsxSingleQuote": true,
- "printWidth": 120,
- "quoteProps": "as-needed",
- "semi": false,
- "singleQuote": true,
- "tabWidth": 2,
- "trailingComma": "none",
- "plugins": ["prettier-plugin-tailwindcss"],
- "tailwindFunctions": ["clsx"]
-}
diff --git a/template/frontend/Dockerfile b/template/frontend/Dockerfile
deleted file mode 100644
index 36f5764..0000000
--- a/template/frontend/Dockerfile
+++ /dev/null
@@ -1,17 +0,0 @@
-FROM node:lts AS base
-FROM base AS build
-
-WORKDIR /app
-COPY package*.json ./
-RUN npm install
-
-# copy node_modules from the build image
-FROM base
-
-WORKDIR /app
-RUN chown node:node /app
-COPY --from=build --chown=node:node /app/node_modules node_modules
-COPY --chown=node:node . /app
-USER node
-
-CMD ["npm", "run", "dev"]
diff --git a/template/frontend/README.md b/template/frontend/README.md
deleted file mode 100644
index 23a47c9..0000000
--- a/template/frontend/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app).
-
-## Getting Started
-
-Running the development server:
-
-Tilt will automatically deploy a NextJS container for development, and it is recommended not to run NextJS locally.
-
-Update the .env.local.example file to .env.local
-Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
-
-You can start editing the page by modifying `pages/index.tsx`. The page auto-updates as you edit the file.
-
-This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
-This project uses GraphQL for the backend, and Apollo Client for the frontend. The Apollo Client is set up in the `lib/apolloClient.ts` file.
-
-## Environment Variables
-
-| Variable Name | Explanation |
-| ---------------------------- | ---------------------------------------------------------- | --- |
-| IS_PRE_PUSH_HOOKS_ENABLED | Controls husky pre-push hooks for frontend folder | |
-| NEXT_PUBLIC_GRAPHQL_ENDPOINT | The public graphql endpoint url |
-| NEXT_GRAPHQL_ENDPOINT | The graphql endpoint url to be used for serverside queries | |
-
-## Suggested Tools
-
-[Apollo Client Devtools](https://chromewebstore.google.com/detail/apollo-client-devtools/jdkknkkbebbapilgoeccciglkfbmbnfm)
-[GraphQL Network Inspector](https://chromewebstore.google.com/detail/graphql-network-inspector/ndlbedplllcgconngcnfmkadhokfaaln)
-
-## Learn More
-
-To learn more about Next.js, take a look at the following resources:
-
-- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
-- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
-
-You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome!
-
-Check out the official [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details.
diff --git a/template/frontend/__generated__/fragment-masking.ts b/template/frontend/__generated__/fragment-masking.ts
deleted file mode 100644
index aca71b1..0000000
--- a/template/frontend/__generated__/fragment-masking.ts
+++ /dev/null
@@ -1,87 +0,0 @@
-/* eslint-disable */
-import { ResultOf, DocumentTypeDecoration, TypedDocumentNode } from '@graphql-typed-document-node/core';
-import { FragmentDefinitionNode } from 'graphql';
-import { Incremental } from './graphql';
-
-
-export type FragmentType> = TDocumentType extends DocumentTypeDecoration<
- infer TType,
- any
->
- ? [TType] extends [{ ' $fragmentName'?: infer TKey }]
- ? TKey extends string
- ? { ' $fragmentRefs'?: { [key in TKey]: TType } }
- : never
- : never
- : never;
-
-// return non-nullable if `fragmentType` is non-nullable
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: FragmentType>
-): TType;
-// return nullable if `fragmentType` is undefined
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: FragmentType> | undefined
-): TType | undefined;
-// return nullable if `fragmentType` is nullable
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: FragmentType> | null
-): TType | null;
-// return nullable if `fragmentType` is nullable or undefined
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: FragmentType> | null | undefined
-): TType | null | undefined;
-// return array of non-nullable if `fragmentType` is array of non-nullable
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: Array>>
-): Array;
-// return array of nullable if `fragmentType` is array of nullable
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: Array>> | null | undefined
-): Array | null | undefined;
-// return readonly array of non-nullable if `fragmentType` is array of non-nullable
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: ReadonlyArray>>
-): ReadonlyArray;
-// return readonly array of nullable if `fragmentType` is array of nullable
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: ReadonlyArray>> | null | undefined
-): ReadonlyArray | null | undefined;
-export function useFragment(
- _documentNode: DocumentTypeDecoration,
- fragmentType: FragmentType> | Array>> | ReadonlyArray>> | null | undefined
-): TType | Array | ReadonlyArray | null | undefined {
- return fragmentType as any;
-}
-
-
-export function makeFragmentData<
- F extends DocumentTypeDecoration,
- FT extends ResultOf
->(data: FT, _fragment: F): FragmentType {
- return data as FragmentType;
-}
-export function isFragmentReady(
- queryNode: DocumentTypeDecoration,
- fragmentNode: TypedDocumentNode,
- data: FragmentType, any>> | null | undefined
-): data is FragmentType {
- const deferredFields = (queryNode as { __meta__?: { deferredFields: Record } }).__meta__
- ?.deferredFields;
-
- if (!deferredFields) return true;
-
- const fragDef = fragmentNode.definitions[0] as FragmentDefinitionNode | undefined;
- const fragName = fragDef?.name?.value;
-
- const fields = (fragName && deferredFields[fragName]) || [];
- return fields.length > 0 && fields.every(field => data && field in data);
-}
diff --git a/template/frontend/__generated__/gql.ts b/template/frontend/__generated__/gql.ts
deleted file mode 100644
index dbd15e5..0000000
--- a/template/frontend/__generated__/gql.ts
+++ /dev/null
@@ -1,42 +0,0 @@
-/* eslint-disable */
-import * as types from './graphql';
-import { TypedDocumentNode as DocumentNode } from '@graphql-typed-document-node/core';
-
-/**
- * Map of all GraphQL operations in the project.
- *
- * This map has several performance disadvantages:
- * 1. It is not tree-shakeable, so it will include all operations in the project.
- * 2. It is not minifiable, so the string of a GraphQL query will be multiple times inside the bundle.
- * 3. It does not support dead code elimination, so it will add unused operations.
- *
- * Therefore it is highly recommended to use the babel or swc plugin for production.
- */
-const documents = {
- "\n query Me {\n me {\n id\n name\n }\n }\n": types.MeDocument,
-};
-
-/**
- * The gql function is used to parse GraphQL queries into a document that can be used by GraphQL clients.
- *
- *
- * @example
- * ```ts
- * const query = gql(`query GetUser($id: ID!) { user(id: $id) { name } }`);
- * ```
- *
- * The query argument is unknown!
- * Please regenerate the types.
- */
-export function gql(source: string): unknown;
-
-/**
- * The gql function is used to parse GraphQL queries into a document that can be used by GraphQL clients.
- */
-export function gql(source: "\n query Me {\n me {\n id\n name\n }\n }\n"): (typeof documents)["\n query Me {\n me {\n id\n name\n }\n }\n"];
-
-export function gql(source: string) {
- return (documents as any)[source] ?? {};
-}
-
-export type DocumentType> = TDocumentNode extends DocumentNode< infer TType, any> ? TType : never;
\ No newline at end of file
diff --git a/template/frontend/__generated__/graphql.ts b/template/frontend/__generated__/graphql.ts
deleted file mode 100644
index e0691ff..0000000
--- a/template/frontend/__generated__/graphql.ts
+++ /dev/null
@@ -1,110 +0,0 @@
-import { TypedDocumentNode as DocumentNode } from '@graphql-typed-document-node/core';
-export type Maybe = T | null;
-export type InputMaybe = Maybe;
-export type Exact = { [K in keyof T]: T[K] };
-export type MakeOptional = Omit & { [SubKey in K]?: Maybe };
-export type MakeMaybe = Omit & { [SubKey in K]: Maybe };
-export type MakeEmpty = { [_ in K]?: never };
-export type Incremental = T | { [P in keyof T]?: P extends ' $fragmentName' | '__typename' ? T[P] : never };
-/** All built-in and custom scalars, mapped to their actual values */
-export type Scalars = {
- ID: { input: string; output: string; }
- String: { input: string; output: string; }
- Boolean: { input: boolean; output: boolean; }
- Int: { input: number; output: number; }
- Float: { input: number; output: number; }
-};
-
-export type OperationInfo = {
- __typename?: 'OperationInfo';
- /** List of messages returned by the operation. */
- messages: Array;
-};
-
-export type OperationMessage = {
- __typename?: 'OperationMessage';
- /** The error code, or `null` if no error code was set. */
- code?: Maybe;
- /** The field that caused the error, or `null` if it isn't associated with any particular field. */
- field?: Maybe;
- /** The kind of this message. */
- kind: OperationMessageKind;
- /** The error message. */
- message: Scalars['String']['output'];
-};
-
-export enum OperationMessageKind {
- Error = 'ERROR',
- Info = 'INFO',
- Permission = 'PERMISSION',
- Validation = 'VALIDATION',
- Warning = 'WARNING'
-}
-
-export type RegisterPayload = OperationInfo | UserType;
-
-export type UpdateUserPayload = OperationInfo | UserType;
-
-export type UserMutation = {
- __typename?: 'UserMutation';
- login: UserType;
- logout: Scalars['Boolean']['output'];
- register: RegisterPayload;
- updateUser: UpdateUserPayload;
-};
-
-
-export type UserMutationLoginArgs = {
- password: Scalars['String']['input'];
- username: Scalars['String']['input'];
-};
-
-
-export type UserMutationRegisterArgs = {
- input: UserRegistrationInput;
-};
-
-
-export type UserMutationUpdateUserArgs = {
- input: UserPartialUpdateInput;
-};
-
-/** User(id, password, last_login, is_superuser, username, first_name, last_name, email, is_staff, is_active, date_joined, name) */
-export type UserPartialUpdateInput = {
- id?: InputMaybe;
- name?: InputMaybe;
-};
-
-export type UserQuery = {
- __typename?: 'UserQuery';
- me: UserType;
-};
-
-/** User(id, password, last_login, is_superuser, username, first_name, last_name, email, is_staff, is_active, date_joined, name) */
-export type UserRegistrationInput = {
- password: Scalars['String']['input'];
- /** Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. */
- username: Scalars['String']['input'];
-};
-
-/** User(id, password, last_login, is_superuser, username, first_name, last_name, email, is_staff, is_active, date_joined, name) */
-export type UserType = {
- __typename?: 'UserType';
- email: Scalars['String']['output'];
- id: Scalars['ID']['output'];
- /** Designates whether this user should be treated as active. Unselect this instead of deleting accounts. */
- isActive: Scalars['Boolean']['output'];
- /** Designates whether the user can log into this admin site. */
- isStaff: Scalars['Boolean']['output'];
- name: Scalars['String']['output'];
- /** Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only. */
- username: Scalars['String']['output'];
-};
-
-export type MeQueryVariables = Exact<{ [key: string]: never; }>;
-
-
-export type MeQuery = { __typename?: 'UserQuery', me: { __typename?: 'UserType', id: string, name: string } };
-
-
-export const MeDocument = {"kind":"Document","definitions":[{"kind":"OperationDefinition","operation":"query","name":{"kind":"Name","value":"Me"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"me"},"selectionSet":{"kind":"SelectionSet","selections":[{"kind":"Field","name":{"kind":"Name","value":"id"}},{"kind":"Field","name":{"kind":"Name","value":"name"}}]}}]}}]} as unknown as DocumentNode;
diff --git a/template/frontend/__generated__/index.ts b/template/frontend/__generated__/index.ts
deleted file mode 100644
index f515991..0000000
--- a/template/frontend/__generated__/index.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-export * from "./fragment-masking";
-export * from "./gql";
\ No newline at end of file
diff --git a/template/frontend/__tests__/about.test.tsx b/template/frontend/__tests__/about.test.tsx
deleted file mode 100644
index 911d864..0000000
--- a/template/frontend/__tests__/about.test.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import { expect, test } from 'vitest'
-
-import { GET_ME } from '@/pages'
-import { render, screen } from '@/utils/test-utils'
-
-import AboutPage from '../pages/about'
-
-const mocks = [
- {
- request: {
- query: GET_ME
- },
- result: {
- data: {
- me: { id: '1', name: 'John Doe' }
- }
- }
- }
-]
-
-test('AboutPage', async () => {
- render( , { mocks })
- expect(await screen.findByRole('heading', { level: 1, name: 'About Page' })).toBeInTheDocument()
- expect(await screen.findByText('John Doe')).toBeInTheDocument()
-})
diff --git a/template/frontend/codegen.ts b/template/frontend/codegen.ts
deleted file mode 100644
index 431d76b..0000000
--- a/template/frontend/codegen.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-import { CodegenConfig } from '@graphql-codegen/cli'
-
-const config: CodegenConfig = {
- schema: 'http://localhost:8000/graphql/',
- // this assumes that all your source files are in a top-level `src/` directory - you might need to adjust this to your file structure
- documents: ['./**/*.{ts,tsx}'],
- generates: {
- './__generated__/': {
- preset: 'client',
- plugins: [],
- presetConfig: {
- gqlTagName: 'gql'
- }
- }
- },
- ignoreNoDocuments: true
-}
-
-export default config
diff --git a/template/frontend/components/ErrorBoundary.tsx b/template/frontend/components/ErrorBoundary.tsx
deleted file mode 100644
index 8a8aca0..0000000
--- a/template/frontend/components/ErrorBoundary.tsx
+++ /dev/null
@@ -1,48 +0,0 @@
-import { Component, ErrorInfo, ReactNode } from 'react'
-
-interface Props {
- children: ReactNode
-}
-
-interface State {
- hasError: boolean
- error: Error | null
- errorInfo: ErrorInfo | null
-}
-
-class ErrorBoundary extends Component {
- constructor(props: Props) {
- super(props)
- this.state = { hasError: false, error: null, errorInfo: null }
- }
-
- static getDerivedStateFromError(error: Error): State {
- // Update state so the next render will show the fallback UI.
- return { hasError: true, error, errorInfo: null }
- }
-
- componentDidCatch(error: Error, errorInfo: ErrorInfo) {
- // You can also log the error to an error reporting service
- this.setState({ error, errorInfo })
- console.error('ErrorBoundary caught an error', error, errorInfo)
- }
-
- render() {
- if (this.state.hasError) {
- return (
-
-
Something went wrong.
-
- {this.state.error && this.state.error.toString()}
-
- {this.state.errorInfo?.componentStack}
-
-
- )
- }
-
- return this.props.children
- }
-}
-
-export default ErrorBoundary
diff --git a/template/frontend/components/Footer.tsx b/template/frontend/components/Footer.tsx
deleted file mode 100644
index a1f2728..0000000
--- a/template/frontend/components/Footer.tsx
+++ /dev/null
@@ -1,24 +0,0 @@
-import Link from 'next/link'
-
-const Footer = () => {
- return (
-
-
-
-
-
{{ copier__project_name }}
-
- © {new Date().getFullYear()} {{ copier__project_name }}. All rights reserved.
-
-
-
- Home
- About
-
-
-
-
- )
-}
-
-export default Footer
diff --git a/template/frontend/components/Layout.tsx b/template/frontend/components/Layout.tsx
deleted file mode 100644
index 8cc6b34..0000000
--- a/template/frontend/components/Layout.tsx
+++ /dev/null
@@ -1,19 +0,0 @@
-import { Inter } from 'next/font/google'
-import { ReactNode } from 'react'
-
-import Footer from './Footer'
-import NavBar from './NavBar'
-
-const inter = Inter({ subsets: ['latin'] })
-
-const Layout = ({ children }: { children: ReactNode }) => {
- return (
-
-
- {children}
-
-
- )
-}
-
-export default Layout
diff --git a/template/frontend/components/NavBar.tsx b/template/frontend/components/NavBar.tsx
deleted file mode 100644
index 12a3ce8..0000000
--- a/template/frontend/components/NavBar.tsx
+++ /dev/null
@@ -1,17 +0,0 @@
-import Link from 'next/link'
-
-const NavBar = () => {
- return (
-
-
-
- SCAF
- Home
- About
-
-
-
- )
-}
-
-export default NavBar
diff --git a/template/frontend/dependencies-dev-init.txt b/template/frontend/dependencies-dev-init.txt
deleted file mode 100644
index 3880bc0..0000000
--- a/template/frontend/dependencies-dev-init.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-@eslint/eslintrc
-@graphql-codegen/cli
-@graphql-codegen/client-preset
-@graphql-typed-document-node/core
-@tailwindcss/postcss
-@testing-library/dom
-@testing-library/react
-@types/lodash
-@types/react
-@types/react-dom
-@vitejs/plugin-react
-eslint
-eslint-config-next
-eslint-plugin-import
-eslint-plugin-jsx-a11y
-eslint-plugin-react
-eslint-plugin-react-hooks
-graphql
-husky
-jsdom
-prettier
-prettier-plugin-tailwindcss
-tailwindcss
-typescript
-typescript-eslint
-vitest
diff --git a/template/frontend/dependencies-init.txt b/template/frontend/dependencies-init.txt
deleted file mode 100644
index 1c24b03..0000000
--- a/template/frontend/dependencies-init.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-@apollo/client@3.14
-@testing-library/jest-dom
-@testing-library/user-event
-@typescript-eslint/eslint-plugin
-clsx
-deepmerge
-eslint-plugin-unused-imports
-lodash
-next
-react
-react-dom
diff --git a/template/frontend/eslint.config.mjs b/template/frontend/eslint.config.mjs
deleted file mode 100644
index d0f6bc1..0000000
--- a/template/frontend/eslint.config.mjs
+++ /dev/null
@@ -1,112 +0,0 @@
-import path from 'node:path'
-import { fileURLToPath } from 'node:url'
-import tseslint from 'typescript-eslint'
-import nextCoreWebVitals from 'eslint-config-next/core-web-vitals'
-import jsxA11y from 'eslint-plugin-jsx-a11y'
-import unusedImports from 'eslint-plugin-unused-imports'
-
-const __filename = fileURLToPath(import.meta.url)
-const __dirname = path.dirname(__filename)
-
-import globals from 'globals'
-export default [
- ...tseslint.configs.recommended,
- // Next.js recommended + core web vitals (flat)
- ...nextCoreWebVitals,
- // Ignores for build outputs and non-source files
- {
- ignores: [
- 'node_modules/**',
- '.next/**',
- 'out/**',
- 'dist/**',
- 'eslint.config.mjs'
- ]
- },
- // Project-specific rules and additional plugins
- {
- plugins: {
- 'unused-imports': unusedImports
- },
- settings: {
- react: { version: 'detect' }
- },
- rules: {
- 'react/jsx-curly-brace-presence': 'error',
- 'unused-imports/no-unused-imports': 'error',
- 'unused-imports/no-unused-vars': [
- 'warn',
- {
- vars: 'all',
- varsIgnorePattern: '^_',
- args: 'after-used',
- argsIgnorePattern: '^_'
- }
- ],
- 'import/order': [
- 'error',
- {
- alphabetize: { caseInsensitive: true, order: 'asc' },
- groups: ['external', 'builtin', 'internal', 'parent', 'sibling', 'index'],
- 'newlines-between': 'always'
- }
- ],
- 'object-shorthand': ['error', 'properties'],
- 'react/jsx-no-useless-fragment': 'error',
- 'require-await': 'error',
- 'no-restricted-imports': [
- 'error',
- {
- paths: [
- { name: '@apollo/client', importNames: ['gql'], message: 'Use the @/__generated__/gql to get proper typings!' },
- { name: '@apollo/client/core', importNames: ['gql'], message: 'Use the @/__generated__/gql to get proper typings!' },
- { name: '@testing-library/react', importNames: ['*'], message: 'Use the imports from test-utils instead!' }
- ]
- }
- ]
- }
- },
-
- // TypeScript-specific settings and rules
- {
- files: ['**/*.ts', '**/*.tsx'],
- languageOptions: {
- parser: tseslint.parser,
- parserOptions: {
- project: ['./tsconfig.json'],
- tsconfigRootDir: __dirname
- }
- },
- plugins: {
- '@typescript-eslint': tseslint.plugin
- },
- rules: {
- '@typescript-eslint/no-unused-vars': 'off',
- '@typescript-eslint/no-unnecessary-condition': 'error',
- '@typescript-eslint/triple-slash-reference': 'off'
- }
- },
- // Node overrides for common config files
- {
- files: [
- '*.config.{js,mjs,ts}'
- ],
- languageOptions: {
- globals: globals.node,
- sourceType: 'module'
- },
- rules: {
- 'import/no-extraneous-dependencies': ['error', { devDependencies: true }]
- }
- },
- {
- files: ['*.config.cjs'],
- languageOptions: {
- globals: globals.node,
- sourceType: 'commonjs'
- },
- rules: {
- 'import/no-extraneous-dependencies': ['error', { devDependencies: true }]
- }
- }
-]
diff --git a/template/frontend/lib/apolloClient.ts b/template/frontend/lib/apolloClient.ts
deleted file mode 100644
index d6144e3..0000000
--- a/template/frontend/lib/apolloClient.ts
+++ /dev/null
@@ -1,82 +0,0 @@
-import { ApolloClient, HttpLink, InMemoryCache, NormalizedCacheObject, from } from '@apollo/client'
-import { setContext } from '@apollo/client/link/context'
-import { onError } from '@apollo/client/link/error'
-import merge from 'deepmerge'
-import isEqual from 'lodash/isEqual'
-import type { AppProps } from 'next/app'
-
-export const APOLLO_STATE_PROP_NAME = '__APOLLO_STATE__'
-
-let apolloClient: ApolloClient | undefined
-
-const errorLink = onError(({ graphQLErrors, networkError }) => {
- if (graphQLErrors)
- graphQLErrors.forEach(({ message, locations, path }) =>
- console.log(`[GraphQL error]: Message: ${message}, Location: ${locations}, Path: ${path}`)
- )
- if (networkError) console.log(`[Network error]: ${networkError}`)
-})
-
-const httpLink = new HttpLink({
- uri:
- typeof window === 'undefined'
- ? process.env.NEXT_GRAPHQL_ENDPOINT || 'http://backend:8000/graphql/'
- : process.env.NEXT_PUBLIC_GRAPHQL_ENDPOINT || 'http://localhost:8000/graphql/',
- credentials: 'include'
-})
-
-const authLink = setContext((_, { headers }) => {
- const token = typeof window !== 'undefined' ? localStorage.getItem('token') : null
- return {
- headers: {
- ...headers,
- authorization: token ? `Bearer ${token}` : ''
- }
- }
-})
-
-function createApolloClient() {
- return new ApolloClient({
- ssrMode: typeof window === 'undefined',
- link: from([errorLink, authLink, httpLink]),
- cache: new InMemoryCache()
- })
-}
-
-export function initializeApollo(initialState: NormalizedCacheObject | null = null) {
- const _apolloClient = apolloClient ?? createApolloClient()
- // the initial state gets hydrated here
- if (initialState) {
- // Get existing cache, loaded during client side data fetching
- const existingCache = _apolloClient.extract()
-
- // Merge the initialState from getStaticProps/getServerSideProps
- // in the existing cache
- const data = merge(existingCache, initialState, {
- // combine arrays using object equality (like in sets)
- arrayMerge: (destinationArray, sourceArray) => [
- ...sourceArray,
- ...destinationArray.filter(d => sourceArray.every(s => !isEqual(d, s)))
- ]
- })
- // Restore the cache with the merged data
- _apolloClient.cache.restore(data)
- }
- // For SSR always create a new Apollo Client
- if (typeof window === 'undefined') return _apolloClient
- // Create the Apollo Client once in the client
- if (!apolloClient) apolloClient = _apolloClient
- return _apolloClient
-}
-
-export function addApolloState(client: typeof apolloClient, pageProps: AppProps['pageProps']) {
- if (pageProps?.props) {
- pageProps.props[APOLLO_STATE_PROP_NAME] = client?.cache.extract()
- }
- return pageProps
-}
-
-export function useApollo(pageProps: AppProps['pageProps']) {
- const state = pageProps[APOLLO_STATE_PROP_NAME]
- return initializeApollo(state)
-}
diff --git a/template/frontend/next.config.mjs b/template/frontend/next.config.mjs
deleted file mode 100644
index 4587f45..0000000
--- a/template/frontend/next.config.mjs
+++ /dev/null
@@ -1,13 +0,0 @@
-import path from 'node:path'
-import { fileURLToPath } from 'node:url'
-
-const __filename = fileURLToPath(import.meta.url)
-const __dirname = path.dirname(__filename)
-
-/** @type {import('next').NextConfig} */
-const nextConfig = {
- reactStrictMode: true,
- outputFileTracingRoot: path.join(__dirname, '..')
-}
-
-export default nextConfig
diff --git a/template/frontend/package.json b/template/frontend/package.json
deleted file mode 100644
index 4fab50e..0000000
--- a/template/frontend/package.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "name": "{{ copier__project_name }}",
- "version": "0.1.0",
- "private": true,
- "scripts": {
- "dev": "next dev",
- "build": "next build",
- "start": "next start",
- "lint-src": "eslint .",
- "lint-fix": "eslint . --fix",
- "format": "prettier --write .",
- "format-check": "prettier --check .",
- "test": "vitest --run",
- "test-watch": "vitest",
- "typecheck": "tsc --noEmit",
- "prepare": "cd .. && husky frontend/.husky",
- "codegen": "graphql-codegen"
- }
-}
diff --git a/template/frontend/pages/404.tsx b/template/frontend/pages/404.tsx
deleted file mode 100644
index 054afad..0000000
--- a/template/frontend/pages/404.tsx
+++ /dev/null
@@ -1,8 +0,0 @@
-export default function Custom404() {
- return (
- <>
- 404 - Page Not Found
- Edit pages/404.tsx
- >
- )
-}
diff --git a/template/frontend/pages/500.tsx b/template/frontend/pages/500.tsx
deleted file mode 100644
index 79519a6..0000000
--- a/template/frontend/pages/500.tsx
+++ /dev/null
@@ -1,3 +0,0 @@
-export default function Custom500() {
- return 500 - Server-side error occurred
-}
diff --git a/template/frontend/pages/_app.tsx b/template/frontend/pages/_app.tsx
deleted file mode 100644
index c63bbcc..0000000
--- a/template/frontend/pages/_app.tsx
+++ /dev/null
@@ -1,21 +0,0 @@
-import { ApolloProvider } from '@apollo/client'
-import type { AppProps } from 'next/app'
-
-import ErrorBoundary from '@/components/ErrorBoundary'
-import Layout from '@/components/Layout'
-import '@/styles/globals.css'
-
-import { useApollo } from '../lib/apolloClient'
-
-export default function App({ Component, pageProps }: AppProps) {
- const apolloClient = useApollo(pageProps)
- return (
-
-
-
-
-
-
-
- )
-}
diff --git a/template/frontend/pages/_document.tsx b/template/frontend/pages/_document.tsx
deleted file mode 100644
index ee65e53..0000000
--- a/template/frontend/pages/_document.tsx
+++ /dev/null
@@ -1,13 +0,0 @@
-import { Html, Head, Main, NextScript } from 'next/document'
-
-export default function Document() {
- return (
-
-
-
-
-
-
-
- )
-}
diff --git a/template/frontend/pages/about.tsx b/template/frontend/pages/about.tsx
deleted file mode 100644
index fd486fb..0000000
--- a/template/frontend/pages/about.tsx
+++ /dev/null
@@ -1,31 +0,0 @@
-import { useQuery } from '@apollo/client'
-
-import { addApolloState, initializeApollo } from '@/lib/apolloClient'
-
-import { GET_ME } from '.'
-
-export default function About() {
- const { loading, error, data } = useQuery(GET_ME)
-
- if (loading) return Loading...
- return (
- <>
- About Page
- This page is using Server Side Rendering to fetch User Info
- {error ? Error: {error.message}
: {data?.me.name}
}{' '}
- >
- )
-}
-export async function getServerSideProps() {
- const apolloClient = initializeApollo()
- try {
- await apolloClient.query({
- query: GET_ME
- })
- } catch (error) {
- console.log('error', error)
- }
- return addApolloState(apolloClient, {
- props: {}
- })
-}
diff --git a/template/frontend/pages/api/hello.ts b/template/frontend/pages/api/hello.ts
deleted file mode 100644
index 9bafa6e..0000000
--- a/template/frontend/pages/api/hello.ts
+++ /dev/null
@@ -1,10 +0,0 @@
-// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
-import type { NextApiRequest, NextApiResponse } from 'next'
-
-type Data = {
- name: string
-}
-
-export default function handler(req: NextApiRequest, res: NextApiResponse) {
- res.status(200).json({ name: 'John Doe' })
-}
diff --git a/template/frontend/pages/index.tsx b/template/frontend/pages/index.tsx
deleted file mode 100644
index c8c0499..0000000
--- a/template/frontend/pages/index.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import { useQuery } from '@apollo/client'
-
-import { gql } from '@/__generated__'
-
-export const GET_ME = gql(/* GraphQL */ `
- query Me {
- me {
- id
- name
- }
- }
-`)
-
-export default function Home() {
- const { loading, error, data } = useQuery(GET_ME)
-
- if (loading) return Loading...
- return (
- <>
- Home Page
- This page using Client Side Rendering to fetch User Info
- {error ? Error: {error.message}
: {data?.me.name}
}
- >
- )
-}
\ No newline at end of file
diff --git a/template/frontend/postcss.config.mjs b/template/frontend/postcss.config.mjs
deleted file mode 100644
index 92f98cd..0000000
--- a/template/frontend/postcss.config.mjs
+++ /dev/null
@@ -1,5 +0,0 @@
-const config = {
- plugins: ["@tailwindcss/postcss"],
-};
-
-export default config;
\ No newline at end of file
diff --git a/template/frontend/public/favicon.ico b/template/frontend/public/favicon.ico
deleted file mode 100644
index 718d6fe..0000000
Binary files a/template/frontend/public/favicon.ico and /dev/null differ
diff --git a/template/frontend/public/scaf-logo.png b/template/frontend/public/scaf-logo.png
deleted file mode 100644
index 521c0d9..0000000
Binary files a/template/frontend/public/scaf-logo.png and /dev/null differ
diff --git a/template/frontend/styles/globals.css b/template/frontend/styles/globals.css
deleted file mode 100644
index 3d552a6..0000000
--- a/template/frontend/styles/globals.css
+++ /dev/null
@@ -1,2 +0,0 @@
-@import "tailwindcss";
-
diff --git a/template/frontend/tsconfig.json b/template/frontend/tsconfig.json
deleted file mode 100644
index 0febbe7..0000000
--- a/template/frontend/tsconfig.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "compilerOptions": {
- "lib": [
- "dom",
- "dom.iterable",
- "esnext"
- ],
- "allowJs": true,
- "skipLibCheck": true,
- "strict": true,
- "noEmit": true,
- "esModuleInterop": true,
- "module": "esnext",
- "moduleResolution": "bundler",
- "resolveJsonModule": true,
- "isolatedModules": true,
- "jsx": "preserve",
- "incremental": true,
- "paths": {
- "@/*": [
- "./*"
- ]
- },
- "target": "ES2017"
- },
- "include": [
- "next-env.d.ts",
- "**/*.ts",
- "**/*.tsx"
- ],
- "exclude": [
- "node_modules"
- ]
-}
diff --git a/template/frontend/utils/test-utils.tsx b/template/frontend/utils/test-utils.tsx
deleted file mode 100644
index 2b6673f..0000000
--- a/template/frontend/utils/test-utils.tsx
+++ /dev/null
@@ -1,18 +0,0 @@
-import { MockedProvider, MockedResponse } from '@apollo/client/testing'
-import { render } from '@testing-library/react'
-import React from 'react'
-
-type Options = {
- mocks?: MockedResponse[]
-}
-const renderWithProviders = (ui: React.ReactElement, options: Options = {}) => {
- render(
-
- {ui}
-
- )
-}
-
-export { screen } from '@testing-library/react'
-export { renderWithProviders as render }
-export { userEvent } from '@testing-library/user-event'
diff --git a/template/frontend/vitest.config.ts b/template/frontend/vitest.config.ts
deleted file mode 100644
index 938d4ad..0000000
--- a/template/frontend/vitest.config.ts
+++ /dev/null
@@ -1,17 +0,0 @@
-import react from '@vitejs/plugin-react'
-import { defineConfig } from 'vitest/config'
-
-import { fileURLToPath } from 'node:url'
-
-export default defineConfig({
- plugins: [react()],
- test: {
- environment: 'jsdom',
- setupFiles: ['./vitest.setup.ts']
- },
- resolve: {
- alias: {
- '@': fileURLToPath(new URL('./', import.meta.url))
- }
- }
-})
diff --git a/template/frontend/vitest.setup.ts b/template/frontend/vitest.setup.ts
deleted file mode 100644
index 10234f4..0000000
--- a/template/frontend/vitest.setup.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-///
-
-import '@testing-library/jest-dom/vitest'
diff --git a/template/k8s/_monitoring/django-logs-table.yaml b/template/k8s/_monitoring/django-logs-table.yaml
deleted file mode 100644
index 581ead0..0000000
--- a/template/k8s/_monitoring/django-logs-table.yaml
+++ /dev/null
@@ -1,190 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- labels:
- grafana_dashboard: "1"
- name: django-logs-table
- namespace: monitoring
-data:
- django-logs-table.json: |-
- {
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": {
- "type": "grafana",
- "uid": "-- Grafana --"
- },
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "fiscalYearStartMonth": 0,
- "graphTooltip": 0,
- "id": 33,
- "links": [],
- "liveNow": false,
- "panels": [
- {
- "datasource": {
- "type": "loki",
- "uid": "P8E80F9AEF21F6940"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "auto",
- "cellOptions": {
- "type": "auto"
- },
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 0
- },
- "id": 1,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "10.1.1",
- "targets": [
- {
- "datasource": {
- "type": "loki",
- "uid": "P8E80F9AEF21F6940"
- },
- "editorMode": "builder",
- "expr": "{namespace=\"default\", container=\"django\"} |= ``",
- "queryType": "range",
- "refId": "A"
- }
- ],
- "title": "Django Logs Table",
- "type": "table"
- },
- {
- "datasource": {
- "type": "loki",
- "uid": "P8E80F9AEF21F6940"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "auto",
- "cellOptions": {
- "type": "auto"
- },
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 0
- },
- "id": 2,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "10.1.1",
- "targets": [
- {
- "datasource": {
- "type": "loki",
- "uid": "P8E80F9AEF21F6940"
- },
- "editorMode": "builder",
- "expr": "{namespace=\"default\", container=\"postgres\"} |= ``",
- "queryType": "range",
- "refId": "A"
- }
- ],
- "title": "Postgres Logs Table",
- "type": "table"
- }
- ],
- "refresh": "",
- "schemaVersion": 38,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": []
- },
- "time": {
- "from": "now-6h",
- "to": "now"
- },
- "timepicker": {},
- "timezone": "",
- "title": "Django",
- "uid": "1693997350",
- "version": 3,
- "weekStart": ""
- }
diff --git a/template/k8s/_monitoring/loki-stack-values.yaml b/template/k8s/_monitoring/loki-stack-values.yaml
deleted file mode 100644
index 9af069a..0000000
--- a/template/k8s/_monitoring/loki-stack-values.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-loki:
- priorityClassName: monitoring-low-priority
- env:
- - name: AWS_ACCESS_KEY_ID
- valueFrom:
- secretKeyRef:
- name: iam-loki-s3
- key: AWS_ACCESS_KEY_ID
- - name: AWS_SECRET_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- name: iam-loki-s3
- key: AWS_SECRET_ACCESS_KEY
- config:
- schema_config:
- configs:
- - from: 2021-05-12
- store: boltdb-shipper
- object_store: s3
- schema: v11
- index:
- prefix: loki_index_
- period: 24h
- storage_config:
- aws:
- s3: s3://{{ copier__aws_region }}/{{ copier__project_dash }}-loki-index-bucket
- s3forcepathstyle: true
- bucketnames: {{ copier__project_dash }}-loki-index-bucket
- region: {{ copier__aws_region }}
- insecure: false
- sse_encryption: false
- boltdb_shipper:
- shared_store: s3
- cache_ttl: 24h
diff --git a/template/k8s/argocd/README.md b/template/k8s/argocd/README.md
deleted file mode 100644
index a066fab..0000000
--- a/template/k8s/argocd/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Kubernetes Control Plane
-
-## ArgoCD
-
-An ArgoCD application will automate your kubernetes deployments.
-
-Install [ArgoCD CLI](https://argo-cd.readthedocs.io/en/stable/cli_installation/).
-
-Log in to your ArgoCD dashboard, eg
-
- $ argocd login argocd.sixfeetup.com
-
-Find your cluster context (set your KUBECONFIG file if neccessary)
-
- $ kubectl config get-contexts -o name
-
-Add your cluster to ArgoCD. This will also output the `CLUSTER_IP` you will use in the application.
-
- $ argocd cluster add {{ copier__project_slug }}-environment
- (replace environment with the actual environment name, e.g. `sandbox` or `production`)
-
-### Creating the ArgoCD application manifests
-
-Export the cluster IP from ArgoCD to your environment
-
- $ export CLUSTER_IP=CLUSTER_IP
-
-Create a deploy key for your repository export it to your environment as `SSH_PRIVATE_KEY`.
-
- $ export SSH_PRIVATE_KEY=DEPLOY_KEY
-
-Export your repository url to your environment
-
- $ export REPO_URL=PROJECT_REPOSITORY
-
-Create the `application.yaml` manifest and `repocred.yaml` secret and seal it
-
- $ make argocd-app
-
-If you are working in the SFU environment move your application and repocreds manifest to `sixfeetup/controlplane.git/argocd/applications/{{ copier__project_slug }}/`
-
-Apply the manifests
-
- $ kubectl apply -f argocd/applications/{{ copier__project_slug }}
-
-Your ArgoCD application should be visible on the ArgoCD dashboard. Check the repository and cluster connection.
diff --git a/template/k8s/base/app.configmap.yaml b/template/k8s/base/app.configmap.yaml
deleted file mode 100644
index 685be10..0000000
--- a/template/k8s/base/app.configmap.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: app-config
-data:
- DJANGO_DEBUG: "False"
- USE_DOCKER: "yes"
- DJANGO_ADMIN_URL: "admin/"
- DJANGO_SETTINGS_MODULE: "config.settings.local"
- DJANGO_SECRET_KEY: "CHANGEME"
- DJANGO_ALLOWED_HOSTS: "localhost,127.0.0.1,backend,.{{ copier__domain_name }}"
- DJANGO_CSRF_TRUSTED_ORIGINS: "http://localhost,https://{{ copier__domain_name }}"
- DJANGO_SECURE_SSL_REDIRECT: "False"
- PGDATA: "/var/lib/postgresql/data/pgdata"
- POSTGRES_DB: "{{ copier__project_slug }}"
- POSTGRES_USER: "{{ copier__project_slug }}"
- REDIS_URL: "redis://redis:6379/1"
- # todo: see if PYDEVD_USE_* can be removed later, I was getting this error
- # with pydevd-pycharm==243.26053.29:
- # cannot import name 'trace_dispatch' from '_pydevd_bundle.pydevd_trace_dispatch'
- # (/app/lib/python3.12/site-packages/_pydevd_bundle/pydevd_trace_dispatch.py)
- # Also probably need to add cython to local.in in that case.
- PYDEVD_USE_CYTHON: "NO"
- PYDEVD_USE_FRAME_EVAL: "NO"
- PYTHONBREAKPOINT: "" # "utils.pycharm_debugger" for pycharm
-{%- if copier__mail_service == 'Mailgun' %}
- MAILGUN_DOMAIN: "{{ copier__domain_name }}"
- MAILGUN_API_URL: "https://api.mailgun.net/v3"{%- endif %}
-{%- if copier__use_celery %}
- CELERY_BROKER_URL: "redis://redis:6379/0"
- FLOWER_BROKER_URL: "redis://redis:6379/0"
- FLOWER_ADDRESS: "0.0.0.0"
- FLOWER_PORT: "5555"{%- endif %}
- # S3 storage access
- DJANGO_AWS_REGION_NAME: "{{ copier__aws_region }}"
- DJANGO_AWS_STORAGE_BUCKET_NAME: "CHANGEME_S3_BUCKET_NAME"
-{% if copier__use_sentry %}
- SENTRY_DSN_BACKEND: ""
- ENVIRONMENT: "dev"
- VITE_SENTRY_DSN_FRONTEND: ""
- VITE_ENVIRONMENT: "dev"{% endif %}
diff --git a/template/k8s/base/celery.yaml b/template/k8s/base/celery.yaml
deleted file mode 100644
index f1509f2..0000000
--- a/template/k8s/base/celery.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: flower
- labels:
- app: celery
-spec:
- type: ClusterIP
- selector:
- app: celery
- ports:
- - port: 5555
- targetPort: flower-server
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: celery
- labels:
- app: celery
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: celery
- template:
- metadata:
- labels:
- app: celery
- spec:
- priorityClassName: app-medium-priority
- containers:
- - name: celeryworker
- image: backend:latest
- command:
- - celery
- args:
- - -A
- - {{ copier__project_slug }}
- - worker
- - -E
- - -l
- - info
- envFrom:
- - configMapRef:
- name: app-config
- - secretRef:
- name: secrets-config
- - name: celerybeat
- image: backend:latest
- command:
- - celery
- args:
- - -A
- - {{ copier__project_slug }}
- - beat
- envFrom:
- - configMapRef:
- name: app-config
- - secretRef:
- name: secrets-config
- - name: flower
- ports:
- - name: flower-server
- containerPort: 5555
- image: backend:latest
- command:
- - celery
- args:
- - -A
- - {{ copier__project_slug }}
- - flower
- envFrom:
- - configMapRef:
- name: app-config
- - secretRef:
- name: secrets-config
diff --git a/template/k8s/base/django.yaml b/template/k8s/base/django.yaml
deleted file mode 100644
index f445f95..0000000
--- a/template/k8s/base/django.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: backend
- labels:
- app: backend
-spec:
- type: ClusterIP
- selector:
- app: backend
- ports:
- - port: 8000
- targetPort: http-server
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: backend
- labels:
- app: backend
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: backend
- template:
- metadata:
- labels:
- app: backend
- spec:
- priorityClassName: app-medium-priority
- containers:
- - name: backend
- image: backend:latest
- command: ["python", "manage.py", "runserver", "0.0.0.0:8000"]
- ports:
- - name: http-server
- containerPort: 8000
- envFrom:
- - configMapRef:
- name: app-config
- - secretRef:
- name: secrets-config
- resources:
- limits:
- cpu: "250m"
- memory: "400Mi"
- requests:
- cpu: "200m"
- memory: "300Mi"
- livenessProbe:
- httpGet:
- path: /healthz
- port: 8000
- httpHeaders:
- - name: Host
- value: localhost
- initialDelaySeconds: 29
- periodSeconds: 29
- timeoutSeconds: 2
- failureThreshold: 3
- terminationGracePeriodSeconds: 60
- readinessProbe:
- httpGet:
- path: /readiness
- port: 8000
- httpHeaders:
- - name: Host
- value: localhost
- initialDelaySeconds: 31
- periodSeconds: 31
- timeoutSeconds: 3
- successThreshold: 1
- initContainers:
- - name: check-db-ready
- image: postgres:17
- command: [
- "sh",
- "-c",
- "until pg_isready -h $(POSTGRES_HOST);
- do echo waiting for postgres; sleep 2; done;",
- ]
- - name: backend-migration
- image: backend:latest
- command: ["python", "manage.py", "migrate"]
- envFrom:
- - configMapRef:
- name: app-config
- - secretRef:
- name: secrets-config
diff --git a/template/k8s/base/flower.yaml b/template/k8s/base/flower.yaml
deleted file mode 100644
index f4589e5..0000000
--- a/template/k8s/base/flower.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: flower
- labels:
- app: {{ copier__project_slug }}
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: {{ copier__project_slug }}
- template:
- metadata:
- labels:
- app: {{ copier__project_slug }}
- spec:
- priorityClassName: app-medium-priority
- containers:
- - name: flower
- image: {{ copier__project_slug }}_local_django:latest
- command: ["celery", "-A", "{{ copier__project_slug }}", "flower"]
- envFrom:
- - configMapRef:
- name: django-config
- initContainers:
- - name: celery-check-workers-ready
- image: {{ copier__project_slug }}_local_django:latest
- command:
- ['sh', '-c', 'until celery -A {{ copier__project_slug }} inspect ping; do echo Waiting for Celery workers to be ready; sleep 5; done']
- envFrom:
- - configMapRef:
- name: django-config
diff --git a/template/k8s/base/frontend.yaml b/template/k8s/base/frontend.yaml
deleted file mode 100644
index aaeae52..0000000
--- a/template/k8s/base/frontend.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: frontend
- labels:
- app: frontend
-spec:
- type: ClusterIP
- selector:
- app: frontend
- ports:
- - port: 3000
- targetPort: http-server
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: frontend
- labels:
- app: frontend
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: frontend
- template:
- metadata:
- labels:
- app: frontend
- spec:
- priorityClassName: app-medium-priority
- containers:
- - name: frontend
- image: frontend:latest
- ports:
- - name: http-server
- containerPort: 3000
- resources:
- limits:
- cpu: "500m"
- memory: "600Mi"
- requests:
- cpu: "100m"
- memory: "300Mi"
- env:
- - name: PORT
- value: "3000"
- - name: DJANGO_ADDRESS
- value: backend:8000
- envFrom:
- - configMapRef:
- name: app-config
- - secretRef:
- name: secrets-config
diff --git a/template/k8s/base/ingress.yaml b/template/k8s/base/ingress.yaml
deleted file mode 100644
index 59d2480..0000000
--- a/template/k8s/base/ingress.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-{% if copier__create_nextjs_frontend %}
-apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
- name: frontend-ingress
- annotations:
- cert-manager.io/issuer: letsencrypt-staging
-spec:
- entryPoints:
- - websecure
- routes:
- - kind: Rule
- match: {{ copier__project_slug }}.local
- priority: 10
- services:
- - name: frontend
- port: 3000
- tls:
- certResolver: letsencrypt
----
-{% endif %}
-apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
- name: backend-ingress
- annotations:
- cert-manager.io/issuer: letsencrypt-staging
-spec:
- entryPoints:
- - websecure
- routes:
- - kind: Rule
- match: k8s.{{ copier__project_slug }}.local
- priority: 10
- services:
- - name: backend
- port: 8000
- tls:
- certResolver: letsencrypt
----
-apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
- name: http-to-https-redirect
-spec:
- entryPoints:
- - web
- routes:
- - kind: Rule
- match: PathPrefix(`/`)
- priority: 1
- middlewares:
- - name: redirect-to-https
- services:
- - kind: TraefikService
- name: noop@internal
----
-apiVersion: traefik.io/v1alpha1
-kind: Middleware
-metadata:
- name: redirect-to-https
-spec:
- redirectScheme:
- scheme: https
- permanent: true
diff --git a/template/k8s/base/kustomization.yaml b/template/k8s/base/kustomization.yaml
deleted file mode 100644
index 25fd7c0..0000000
--- a/template/k8s/base/kustomization.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-
-resources:
-- ./app.configmap.yaml
-- ./django.yaml
-{% if copier__create_nextjs_frontend %}
-- ./frontend.yaml{% endif %}
-- ./redis.yaml
-{%- if copier__use_celery %}
-- ./celery.yaml{%- endif %}
-- ./pod-priority.yaml
-- ./pod-disruption-budgets.yaml
\ No newline at end of file
diff --git a/template/k8s/base/pod-disruption-budgets.yaml b/template/k8s/base/pod-disruption-budgets.yaml
deleted file mode 100644
index deb85ac..0000000
--- a/template/k8s/base/pod-disruption-budgets.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-apiVersion: policy/v1
-kind: PodDisruptionBudget
-metadata:
- name: backend-pdb
-spec:
- minAvailable: 1
- selector:
- matchLabels:
- app: backend
----
-{%- if copier__use_celery %}
-apiVersion: policy/v1
-kind: PodDisruptionBudget
-metadata:
- name: celery-pdb
-spec:
- minAvailable: 1
- selector:
- matchLabels:
- app: celery
----
-apiVersion: policy/v1
-kind: PodDisruptionBudget
-metadata:
- name: flower-pdb
-spec:
- minAvailable: 1
- selector:
- matchLabels:
- app: flower
----
-{%- endif %}
-{% if copier__create_nextjs_frontend %}
-apiVersion: policy/v1
-kind: PodDisruptionBudget
-metadata:
- name: frontend-pdb
-spec:
- minAvailable: 1
- selector:
- matchLabels:
- app: frontend
----
-{% endif %}
-apiVersion: policy/v1
-kind: PodDisruptionBudget
-metadata:
- name: redis-pdb
-spec:
- minAvailable: 1
- selector:
- matchLabels:
- app: redis
diff --git a/template/k8s/base/pod-priority.yaml b/template/k8s/base/pod-priority.yaml
deleted file mode 100644
index 61d3ea5..0000000
--- a/template/k8s/base/pod-priority.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-apiVersion: scheduling.k8s.io/v1
-kind: PriorityClass
-metadata:
- name: postgres-high-priority
-value: 100000
-globalDefault: false
-description: "Priority class for PostgreSQL to avoid eviction"
-
----
-apiVersion: scheduling.k8s.io/v1
-kind: PriorityClass
-metadata:
- name: app-medium-priority
-value: 50000
-globalDefault: false
-description: "Priority class for the main application stack"
-
----
-apiVersion: scheduling.k8s.io/v1
-kind: PriorityClass
-metadata:
- name: monitoring-low-priority
-value: 10000
-globalDefault: false
-description: "Priority class for monitoring tools like Prometheus, Grafana, etc."
diff --git a/template/k8s/base/redis.yaml b/template/k8s/base/redis.yaml
deleted file mode 100644
index 6b55a6e..0000000
--- a/template/k8s/base/redis.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: redis
- labels:
- app: redis
-spec:
- type: ClusterIP
- selector:
- app: redis
- ports:
- - port: 6379
- targetPort: redis-server
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: redis
- labels:
- app: redis
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: redis
- template:
- metadata:
- labels:
- app: redis
- spec:
- priorityClassName: app-medium-priority
- containers:
- - name: redis
- image: redis:6.0.5
- ports:
- - name: redis-server
- containerPort: 6379
diff --git a/template/k8s/local/kustomization.yaml b/template/k8s/local/kustomization.yaml
deleted file mode 100644
index b3196a0..0000000
--- a/template/k8s/local/kustomization.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-
-resources:
-- ../base
-- ../mailhog
-- ./postgres.yaml
-- ./secrets.yaml
-
-patches:
-- patch: |-
- - op: add
- path: /spec/template/spec/initContainers/0/env
- value:
- - name: POSTGRES_HOST
- value: postgres
- - op: replace
- path: /spec/template/spec/containers/0/livenessProbe
- value: null
- - op: replace
- path: /spec/template/spec/containers/0/readinessProbe
- value: null
- target:
- kind: Deployment
- name: backend
-- patch: |-
- - op: add
- path: /spec/template/spec/containers/0/env
- value:
- - name: DOCKER_GATEWAY_IP # used for debugger to reach out to host machine
- value: "172.17.0.1"
- target:
- kind: Deployment
- name: backend
diff --git a/template/k8s/local/postgres.yaml b/template/k8s/local/postgres.yaml
deleted file mode 100644
index 4a632b5..0000000
--- a/template/k8s/local/postgres.yaml
+++ /dev/null
@@ -1,89 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: postgres
- labels:
- app: postgres
-spec:
- ports:
- - port: 5432
- targetPort: 5432
- selector:
- app: postgres
----
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: postgres
- labels:
- app: postgres
-spec:
- serviceName: postgres
- replicas: 1
- selector:
- matchLabels:
- app: postgres
- template:
- metadata:
- labels:
- app: postgres
- spec:
- priorityClassName: postgres-high-priority
- containers:
- - name: postgres
- image: postgres:17
- ports:
- - containerPort: 5432
- envFrom:
- - configMapRef:
- name: app-config
- - secretRef:
- name: secrets-config
- volumeMounts:
- - name: {{ copier__project_dash }}-postgres-volume-mount
- mountPath: /var/lib/postgresql/data
- # readinessProbe:
- # exec:
- # command:
- # - bash
- # - "-c"
- # - "psql -U$POSTGRES_USER -d$POSTGRES_DB -c 'SELECT 1'"
- # initialDelaySeconds: 15
- # timeoutSeconds: 2
- # livenessProbe:
- # exec:
- # command:
- # - bash
- # - "-c"
- # - "psql -U$POSTGRES_USER -d$POSTGRES_DB -c 'SELECT 1'"
- # initialDelaySeconds: 15
- # timeoutSeconds: 2
- volumes:
- - name: {{ copier__project_dash }}-postgres-volume-mount
- persistentVolumeClaim:
- claimName: {{ copier__project_dash }}-postgres-pvc
----
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- annotations:
- tilt.dev/down-policy: keep
- name: {{ copier__project_dash }}-postgres-pvc
- labels:
- type: local
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 500Mi
----
-apiVersion: policy/v1
-kind: PodDisruptionBudget
-metadata:
- name: postgres-pdb
-spec:
- minAvailable: 1
- selector:
- matchLabels:
- app: postgres
diff --git a/template/k8s/local/secrets.yaml b/template/k8s/local/secrets.yaml
deleted file mode 100644
index 9301241..0000000
--- a/template/k8s/local/secrets.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: v1
-stringData:
- POSTGRES_PASSWORD: __POSTGRES_PASSWORD__
- DATABASE_URL: postgresql://{{ copier__project_slug }}:__POSTGRES_PASSWORD__@postgres/{{ copier__project_slug }}
- DJANGO_SECRET_KEY: __DJANGO_SECRET_KEY__
-kind: Secret
-metadata:
- name: secrets-config
-type: Opaque
diff --git a/template/k8s/mailhog/kustomization.yaml b/template/k8s/mailhog/kustomization.yaml
deleted file mode 100644
index a4a1a56..0000000
--- a/template/k8s/mailhog/kustomization.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-
-resources:
- - mailhog.yaml
diff --git a/template/k8s/mailhog/mailhog.yaml b/template/k8s/mailhog/mailhog.yaml
deleted file mode 100644
index f3f3517..0000000
--- a/template/k8s/mailhog/mailhog.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: mailhog
- labels:
- app: mailhog
-spec:
- type: ClusterIP
- selector:
- app: mailhog
- ports:
- - port: 8025
- targetPort: smtp-server-ui
- name: smtp-server-ui
- - port: 1025
- targetPort: smtp-server
- name: smtp-server
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: mailhog
- labels:
- app: mailhog
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: mailhog
- template:
- metadata:
- labels:
- app: mailhog
- spec:
- containers:
- - name: mailhog
- image: mailhog/mailhog:v1.0.0
- ports:
- - name: smtp-server-ui
- containerPort: 8025
- - name: smtp-server
- containerPort: 1025
diff --git a/template/k8s/prod/kustomization.yaml b/template/k8s/prod/kustomization.yaml
deleted file mode 100644
index e98a1c8..0000000
--- a/template/k8s/prod/kustomization.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-namespace: {{ copier__rfc1123_subdomain }}-prod
-
-resources:
- - ../sandbox
- - secrets.yaml
-
-patches:
-- target:
- kind: SealedSecret
- name: secrets-config
- path: secrets.yaml
-- patch: |-
- - op: replace
- path: /spec/issuerRef/name
- value: letsencrypt-prod
- - op: replace
- path: /spec/dnsNames
- value:
- - api.{{ copier__domain_name }}
- - k8s.{{ copier__domain_name }}
- target:
- kind: Certificate
- name: cluster-cert
-- patch: |-
- - op: replace
- path: /metadata/annotations/cert-manager.io~1cluster-issuer
- value: letsencrypt-prod
- - op: replace
- path: /spec/routes/0/match
- value: Host(`api.{{ copier__domain_name }}`)
- target:
- kind: IngressRoute
- name: backend-ingress
-- patch: |-
- - op: replace
- path: /metadata/annotations/cert-manager.io~1cluster-issuer
- value: letsencrypt-prod
- - op: replace
- path: /spec/routes/0/match
- value: Host(`k8s.{{ copier__domain_name }}`)
- target:
- kind: IngressRoute
- name: k8s-ingress
-- patch: |-
- - op: replace
- path: /spec/backup/barmanObjectStore/destinationPath
- value: "s3://{{ copier__project_dash }}-prod-backups/"
- target:
- kind: Cluster
- name: postgres
-{% if copier__create_nextjs_frontend %}
-- patch: |-
- - op: add
- path: /data/DISTRIBUTION_ID
- value: "CHANGEME"
- target:
- kind: ConfigMap
- name: app-config{% endif %}
-
-configMapGenerator:
- - name: app-config
- behavior: merge
- literals:
- - ENVIRONMENT="production"
- - DJANGO_SETTINGS_MODULE="config.settings.production"
- - DJANGO_CSRF_TRUSTED_ORIGINS="https://{{ copier__domain_name }}"
- - AWS_S3_CUSTOM_DOMAIN="{{ copier__domain_name }}"
-
-images:
- - name: {{ copier__aws_account_id }}.dkr.ecr.{{ copier__aws_region }}.amazonaws.com/{{ copier__project_dash }}-sandbox-backend
- newName: {{ copier__aws_account_id }}.dkr.ecr.{{ copier__aws_region }}.amazonaws.com/{{ copier__project_dash }}-backend
- newTag: latest
-{% if copier__create_nextjs_frontend %}
- - name: {{ copier__aws_account_id }}.dkr.ecr.{{ copier__aws_region }}.amazonaws.com/{{ copier__project_dash }}-sandbox-frontend
- newName: {{ copier__aws_account_id }}.dkr.ecr.{{ copier__aws_region }}.amazonaws.com/{{ copier__project_dash }}-frontend
- newTag: latest{% endif %}
diff --git a/template/k8s/sandbox/certificate.yaml b/template/k8s/sandbox/certificate.yaml
deleted file mode 100644
index 2d3cc32..0000000
--- a/template/k8s/sandbox/certificate.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: cert-manager.io/v1
-kind: Certificate
-metadata:
- name: cluster-cert
-spec:
- secretName: cluster-cert-tls
- issuerRef:
- name: letsencrypt-prod
- kind: ClusterIssuer
- dnsNames:
- - api.sandbox.{{ copier__domain_name }}
- - k8s.sandbox.{{ copier__domain_name }}
- - nextjs.{{ copier__domain_name }}
diff --git a/template/k8s/sandbox/ingress-route.yaml b/template/k8s/sandbox/ingress-route.yaml
deleted file mode 100644
index 30179e3..0000000
--- a/template/k8s/sandbox/ingress-route.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
- name: backend-ingress
- annotations:
- cert-manager.io/cluster-issuer: letsencrypt-staging
-spec:
- entryPoints:
- - websecure
- routes:
- - kind: Rule
- match: Host(`api.sandbox.{{ copier__domain_name }}`)
- priority: 10
- services:
- - name: backend
- port: 8000
- tls:
- secretName: cluster-cert-tls
----
-{% if copier__create_nextjs_frontend %}
-apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
- name: frontend-ingress
- annotations:
- cert-manager.io/cluster-issuer: letsencrypt-staging
-spec:
- entryPoints:
- - websecure
- routes:
- - kind: Rule
- match: Host(`nextjs.{{ copier__domain_name }}`)
- priority: 10
- services:
- - name: frontend
- port: 3000
- tls:
- secretName: cluster-cert-tls
----
-{% endif %}
-apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
- name: k8s-ingress
- annotations:
- cert-manager.io/cluster-issuer: letsencrypt-staging
-spec:
- entryPoints:
- - websecure
- routes:
- - kind: Rule
- match: Host(`k8s.sandbox.{{ copier__domain_name }}`)
- priority: 10
- services:
- - name: backend
- port: 8000
- tls:
- secretName: cluster-cert-tls
----
-apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
- name: http-to-https-redirect
-spec:
- entryPoints:
- - web
- routes:
- - kind: Rule
- match: PathPrefix(`/`)
- priority: 1
- middlewares:
- - name: redirect-to-https
- services:
- - kind: TraefikService
- name: noop@internal
----
-apiVersion: traefik.io/v1alpha1
-kind: Middleware
-metadata:
- name: redirect-to-https
-spec:
- redirectScheme:
- scheme: https
- permanent: true
diff --git a/template/k8s/sandbox/kustomization.yaml b/template/k8s/sandbox/kustomization.yaml
deleted file mode 100644
index 2f45f69..0000000
--- a/template/k8s/sandbox/kustomization.yaml
+++ /dev/null
@@ -1,139 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-namespace: {{ copier__rfc1123_subdomain }}-sandbox
-
-resources:
- - ../base
- - certificate.yaml
- - ingress-route.yaml
- - postgres.cnpg.yaml
- - secrets.yaml
-
-patches:
-- patch: |-
- - op: replace
- path: /spec/template/spec/containers/0/command
- value: ["daphne"]
- - op: replace
- path: /spec/template/spec/containers/0/args
- value: ["--bind", "0.0.0.0", "--port", "8000", "--ping-interval", "15", "--ping-timeout", "5", "config.asgi:application"]
- - op: add
- path: /spec/template/spec/imagePullSecrets
- value:
- - name: regcred
- - op: add
- path: /spec/template/spec/initContainers/0/env
- value:
- - name: POSTGRES_HOST
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: host
- - op: replace
- path: /spec/template/spec/initContainers/0/command
- value: ["sh", "-c", "until pg_isready -h $(POSTGRES_HOST); do echo waiting for postgres; sleep 2; done;"]
- - op: add
- path: /spec/template/spec/initContainers/1/env
- value:
- - name: DATABASE_URL
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: uri
- - op: add
- path: /spec/template/spec/containers/0/env
- value:
- - name: POSTGRES_HOST
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: host
- - name: DATABASE_URL
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: uri
- target:
- kind: Deployment
- name: backend
-{%- if copier__use_celery %}
-- patch: |-
- - op: add
- path: /spec/template/spec/containers/0/env
- value:
- - name: POSTGRES_HOST
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: host
- - name: DATABASE_URL
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: uri
- - op: add
- path: /spec/template/spec/containers/1/env
- value:
- - name: POSTGRES_HOST
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: host
- - name: DATABASE_URL
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: uri
- - op: add
- path: /spec/template/spec/containers/2/env
- value:
- - name: POSTGRES_HOST
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: host
- - name: DATABASE_URL
- valueFrom:
- secretKeyRef:
- name: postgres-app
- key: uri
- target:
- kind: Deployment
- name: celery
-{%- endif %}
-{% if copier__create_nextjs_frontend %}
-- patch: |-
- - op: add
- path: /spec/template/spec/imagePullSecrets
- value:
- - name: regcred
- target:
- kind: Deployment
- name: frontend
-- patch: |-
- - op: add
- path: /data/DISTRIBUTION_ID
- value: "CHANGEME"
- target:
- kind: ConfigMap
- name: app-config{% endif %}
-
-configMapGenerator:
- - name: app-config
- behavior: merge
- literals:
- - ENVIRONMENT="sandbox"
- - DJANGO_SETTINGS_MODULE="config.settings.production"
- - DJANGO_CSRF_TRUSTED_ORIGINS="https://sandbox.{{ copier__domain_name }}"
- - CORS_ALLOWED_ORIGINS="https://nextjs.{{ copier__domain_name }},https://sandbox.{{ copier__domain_name }}"
- - CORS_ALLOW_CREDENTIALS=True
- - AWS_S3_CUSTOM_DOMAIN="sandbox.{{ copier__domain_name }}"
-
-images:
- - name: backend
- newName: {{ copier__aws_account_id }}.dkr.ecr.{{ copier__aws_region }}.amazonaws.com/{{ copier__project_dash }}-sandbox-backend
- newTag: latest
-{% if copier__create_nextjs_frontend %}
- - name: frontend
- newName: {{ copier__aws_account_id }}.dkr.ecr.{{ copier__aws_region }}.amazonaws.com/{{ copier__project_dash }}-sandbox-frontend
- newTag: latest{% endif %}
diff --git a/template/k8s/sandbox/postgres.cnpg.yaml b/template/k8s/sandbox/postgres.cnpg.yaml
deleted file mode 100644
index 9c8a5c7..0000000
--- a/template/k8s/sandbox/postgres.cnpg.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-apiVersion: postgresql.cnpg.io/v1
-kind: Cluster
-metadata:
- name: postgres
- labels:
- app: postgres
-spec:
- priorityClassName: postgres-high-priority
- instances: 1
- # TODO: Uncomment after first successful backup
- # bootstrap:
- # recovery:
- # source: clusterBackup
- primaryUpdateStrategy: unsupervised
- storage:
- size: 2Gi
-
- backup:
- barmanObjectStore:
- destinationPath: "s3://{{ copier__project_dash }}-sandbox-backups/"
- s3Credentials:
- accessKeyId:
- name: aws-creds
- key: AWS_S3_ACCESS_KEY_ID
- secretAccessKey:
- name: aws-creds
- key: AWS_S3_SECRET_ACCESS_KEY
- wal:
- compression: gzip
- data:
- compression: gzip
- jobs: 1
- tags:
- backupRetentionPolicy: "expire"
- historyTags:
- backupRetentionPolicy: "keep"
- retentionPolicy: "30d"
-
- externalClusters:
- - name: clusterBackup
- barmanObjectStore:
- destinationPath: "s3://{{ copier__project_dash }}-sandbox-backups/"
- # Copy backup from "postgres" to "postgres-restore"
- # Or set up a Kubernetes Job to copy it
- serverName: postgres-restore
- s3Credentials:
- accessKeyId:
- name: secrets-config
- key: AWS_S3_ACCESS_KEY_ID
- secretAccessKey:
- name: secrets-config
- key: AWS_S3_SECRET_ACCESS_KEY
- data:
- compression: gzip
- wal:
- compression: gzip
- maxParallel: 8
----
-apiVersion: postgresql.cnpg.io/v1
-kind: ScheduledBackup
-metadata:
- name: scheduled-backup
-spec:
- schedule: "0 0 * * *" # Runs daily at midnight
- suspend: false
- immediate: true
- backupOwnerReference: self
- cluster:
- name: postgres
diff --git a/template/k8s/templates/secrets.yaml.template b/template/k8s/templates/secrets.yaml.template
deleted file mode 100644
index 90c3127..0000000
--- a/template/k8s/templates/secrets.yaml.template
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-stringData:
- AWS_S3_ACCESS_KEY_ID: $AWS_S3_ACCESS_KEY_ID
- AWS_S3_SECRET_ACCESS_KEY: $AWS_S3_SECRET_ACCESS_KEY
-{%- if copier__mail_service == 'Amazon SES' %}
- AWS_SES_ACCESS_KEY_ID: $AWS_SES_ACCESS_KEY_ID
- AWS_SES_SECRET_ACCESS_KEY: $AWS_SES_SECRET_ACCESS_KEY{%- endif %}
- DJANGO_SECRET_KEY: $DJANGO_SECRET_KEY
-{%- if copier__mail_service == 'Mailgun' %}
- MAILGUN_API_KEY: op://{{ copier__project_name }}/$ENVIRONMENT secrets/MAILGUN_API_KEY"{%- endif %}
-
-kind: Secret
-metadata:
- name: secrets-config
-type: Opaque
diff --git a/template/tasks.py b/template/tasks.py
index 2e8f848..7db06de 100644
--- a/template/tasks.py
+++ b/template/tasks.py
@@ -18,21 +18,6 @@ def init_git_repo():
print(SUCCESS + "Git repository initialized." + TERMINATOR)
-def configure_git_remote():
- repo_url = "{{ copier__repo_url }}"
- if repo_url:
- print(INFO + f"repo_url: {repo_url}" + TERMINATOR)
- command = f"git remote add origin {repo_url}"
- subprocess.run(shlex.split(command), check=True)
- print(SUCCESS + f"Remote origin={repo_url} added." + TERMINATOR)
- else:
- print(
- WARNING
- + "No repo_url provided. Skipping git remote configuration."
- + TERMINATOR
- )
-
-
def party_popper():
for _ in range(4):
print("\r🎉 POP! 💥", end="", flush=True)
@@ -40,30 +25,24 @@ def party_popper():
print("\r💥 POP! 🎉", end="", flush=True)
subprocess.run(["sleep", "0.3"])
- print("\r🎊 Congrats! Your {{ copier__project_slug }} project is ready! 🎉")
+ print("\r🎊 Congrats! Your {{ copier__project_slug }} Talos cluster project is ready! 🎉")
print()
- print("To get started, run:")
- print("cd {{ copier__project_slug }}")
- print("tilt up")
+ print("To get started:")
+ print("1. cd {{ copier__project_slug }}")
+ print("2. Create S3 backend: cd terraform/bootstrap && tofu init && tofu plan -out=tfplan.out && tofu apply tfplan.out")
+ print("3. Deploy infrastructure: cd ../sandbox && tofu init && tofu plan -out=tfplan.out && tofu apply tfplan.out")
+ print("4. Bootstrap Talos: cd ../../bootstrap-cluster && export ENV=sandbox && task talos:bootstrap")
print()
def run_setup():
- subprocess.run(
- shlex.split("kind create cluster --name {{ copier__project_dash }}"), check=True
- )
- subprocess.run(shlex.split("make compile"), check=True)
-
- print("Dependencies compiled successfully.")
print("Performing initial commit.")
-
subprocess.run(shlex.split("git add ."), check=True)
subprocess.run(shlex.split("git commit -m 'Initial commit' --quiet"), check=True)
def main():
init_git_repo()
- configure_git_remote()
run_setup()
party_popper()
diff --git a/template/terraform/README.md b/template/terraform/README.md
index f79ce3d..81af4c4 100644
--- a/template/terraform/README.md
+++ b/template/terraform/README.md
@@ -48,17 +48,28 @@ Export the `AWS_PROFILE` environment variable:
```
$ export AWS_PROFILE=scaf
```
-Then, to deploy to sandbox you can run:
+## Deployment
+
+### Bootstrap Terraform State Backend
+
+First, create the S3 bucket and DynamoDB table for Terraform state:
+
+```bash
+cd bootstrap
+tofu init && tofu plan -out=tfplan.out && tofu apply tfplan.out
```
-$ task deploy-sandbox
-```
-TODO: replace with automation between the environment creation and the argocd bootstrap:
+This only needs to be run once for all environments.
+
+### Deploy an Environment
+To deploy the sandbox environment:
+```bash
+cd ../sandbox
+tofu init && tofu plan -out=tfplan.out && tofu apply tfplan.out
```
-{% if copier__create_nextjs_frontend %}
-6. After the environment is successfully deployed, note the CloudFront distribution ID that was created, and update the `DISTRIBUTION_ID` value in the corresponding kustomization.yaml file (e.g., sandbox/kustomization.yaml or production/kustomization.yaml) to reflect the correct value.
-{% endif %}
+
+For other environments, replace `sandbox` with `staging` or `production`.
diff --git a/template/terraform/modules/base/Makefile b/template/terraform/modules/base/Makefile
index 39f1f4f..739b4e1 100644
--- a/template/terraform/modules/base/Makefile
+++ b/template/terraform/modules/base/Makefile
@@ -21,8 +21,6 @@ kubeconfig:
remove-kube-state:
rm -f kubeconfig
- tofu state rm helm_release.argocd \
- kubernetes_namespace.monitoring || true
# TODO: add copier__use_talos check for talos targets
talosconfig:
@@ -55,7 +53,7 @@ upgrade-talos:
for ip in $${ip_array[@]} ; do \
echo "Upgrading Talos node at IP: $$ip" ; \
talosctl upgrade --nodes $$ip \
- --image factory.talos.dev/installer/10e276a06c1f86b182757a962258ac00655d3425e5957f617bdc82f06894e39b:v1.7.4 ; \
+ --image factory.talos.dev/installer/10e276a06c1f86b182757a962258ac00655d3425e5957f617bdc82f06894e39b:v1.12.1 ; \
done
destroy: remove-kube-state remove-talos-state
diff --git a/template/terraform/modules/base/ecr.tf b/template/terraform/modules/base/ecr.tf
deleted file mode 100644
index b5974dd..0000000
--- a/template/terraform/modules/base/ecr.tf
+++ /dev/null
@@ -1,57 +0,0 @@
-{% if copier__create_nextjs_frontend %}
-module "ecr_frontend" {
- source = "terraform-aws-modules/ecr/aws"
- version = "1.6.0"
-
- repository_name = var.frontend_ecr_repo
- repository_image_tag_mutability = "MUTABLE"
-
- repository_lifecycle_policy = jsonencode({
- rules = [
- {
- rulePriority = 1,
- description = "Keep last 5 images",
- selection = {
- tagStatus = "tagged",
- tagPrefixList = ["v"],
- countType = "imageCountMoreThan",
- countNumber = 5
- },
- action = {
- type = "expire"
- }
- }
- ]
- })
-
- tags = local.common_tags
-}
-{% endif %}
-
-module "ecr_backend" {
- source = "terraform-aws-modules/ecr/aws"
- version = "1.6.0"
-
- repository_name = var.backend_ecr_repo
- repository_image_tag_mutability = "MUTABLE"
-
- repository_lifecycle_policy = jsonencode({
- rules = [
- {
- rulePriority = 1,
- description = "Keep last 5 images",
- selection = {
- tagStatus = "tagged",
- tagPrefixList = ["v"],
- countType = "imageCountMoreThan",
- countNumber = 5
- },
- action = {
- type = "expire"
- }
- }
- ]
- })
-
- tags = local.common_tags
-}
diff --git a/template/terraform/modules/base/github-iam-role.tf b/template/terraform/modules/base/github-iam-role.tf
deleted file mode 100644
index 44f7934..0000000
--- a/template/terraform/modules/base/github-iam-role.tf
+++ /dev/null
@@ -1,43 +0,0 @@
-data "aws_iam_role" "github_oidc_role" {
- name = "{{ copier__project_slug }}-github-oidc-role"
-}
-
-# Define the IAM policy for ECR
-resource "aws_iam_policy" "ecr_push_policy" {
- name = "${var.app_name}-${var.repo_name}-ecr-push-policy"
- description = "Policy to allow pushing images to ECR"
-
- policy = jsonencode({
- Version = "2012-10-17",
- Statement = [
- {
- Effect = "Allow",
- Action = [
- "ecr:GetDownloadUrlForLayer",
- "ecr:BatchGetImage",
- "ecr:BatchCheckLayerAvailability",
- "ecr:PutImage",
- "ecr:InitiateLayerUpload",
- "ecr:UploadLayerPart",
- "ecr:CompleteLayerUpload"
- ],
- Resource = [
- {% if copier__create_nextjs_frontend %}"arn:aws:ecr:${var.aws_region}:${var.account_id}:repository/${var.frontend_ecr_repo}",{% endif %}
- "arn:aws:ecr:${var.aws_region}:${var.account_id}:repository/${var.backend_ecr_repo}",
- ]
- },
- {
- Effect = "Allow",
- Action = "ecr:GetAuthorizationToken",
- Resource = "*"
- }
- ]
- })
-}
-
-# Attach the policy to the role
-resource "aws_iam_role_policy_attachment" "ecr_push_policy_attachment" {
- role = data.aws_iam_role.github_oidc_role.name
- policy_arn = aws_iam_policy.ecr_push_policy.arn
-}
-
diff --git a/template/terraform/modules/base/outputs.tf b/template/terraform/modules/base/outputs.tf
index 3012a45..039f56f 100644
--- a/template/terraform/modules/base/outputs.tf
+++ b/template/terraform/modules/base/outputs.tf
@@ -7,8 +7,3 @@ output "control_plane_nodes_private_ips" {
description = "The private ip addresses of the control plane nodes."
value = join(",", module.control_plane_nodes.*.private_ip)
}
-
-output "backend_ecr_repo" {
- description = "The Backend ECR repository"
- value = module.ecr_backend.repository_url
-}
diff --git a/template/terraform/modules/base/route53.tf b/template/terraform/modules/base/route53.tf
index 998ea29..0ccefd9 100644
--- a/template/terraform/modules/base/route53.tf
+++ b/template/terraform/modules/base/route53.tf
@@ -16,14 +16,6 @@ locals {
zone_id = var.existing_hosted_zone == "" ? aws_route53_zone.route_zone[0].zone_id : data.aws_route53_zone.existing_zone[0].zone_id
}
-resource "aws_route53_record" "api" {
- zone_id = local.zone_id
- name = var.api_domain_name
- type = "CNAME"
- records = [module.elb_k8s_elb.elb_dns_name]
- ttl = 600
-}
-
resource "aws_route53_record" "k8s" {
zone_id = local.zone_id
name = var.cluster_domain_name
@@ -31,55 +23,3 @@ resource "aws_route53_record" "k8s" {
records = [module.elb_k8s_elb.elb_dns_name]
ttl = 600
}
-
-{% if copier__create_nextjs_frontend %}
-resource "aws_route53_record" "nextjs" {
- zone_id = local.zone_id
- name = var.nextjs_domain_name
- type = "CNAME"
- records = [module.elb_k8s_elb.elb_dns_name]
- ttl = 600
-}
-
-resource "aws_route53_record" "frontend" {
- zone_id = local.zone_id
- name = var.domain_name
- type = "A"
-
- alias {
- name = aws_cloudfront_distribution.cloudfront.domain_name
- zone_id = aws_cloudfront_distribution.cloudfront.hosted_zone_id
- evaluate_target_health = false
- }
-}
-
-resource "aws_route53_record" "frontend-v6" {
- zone_id = local.zone_id
- name = var.domain_name
- type = "AAAA"
-
- alias {
- name = aws_cloudfront_distribution.cloudfront.domain_name
- zone_id = aws_cloudfront_distribution.cloudfront.hosted_zone_id
- evaluate_target_health = false
- }
-}
-{% endif %}
-
-# record for argocd call
-resource "aws_route53_record" "argocd" {
- zone_id = local.zone_id
- name = var.argocd_domain_name
- type = "CNAME"
- records = [module.elb_k8s_elb.elb_dns_name]
- ttl = 600
-}
-
-# record for prometheus call
-resource "aws_route53_record" "prometheus" {
- zone_id = local.zone_id
- name = var.prometheus_domain_name
- type = "CNAME"
- records = [module.elb_k8s_elb.elb_dns_name]
- ttl = 600
-}
diff --git a/template/terraform/modules/base/security_groups.tf b/template/terraform/modules/base/security_groups.tf
index eb63998..9f9de6b 100644
--- a/template/terraform/modules/base/security_groups.tf
+++ b/template/terraform/modules/base/security_groups.tf
@@ -33,7 +33,6 @@ module "cluster_sg" {
cidr_blocks = var.admin_allowed_ips
description = "Kubernetes API Access"
},
-{% if copier__operating_system == "talos" %}
{
from_port = 50000
to_port = 50000
@@ -41,15 +40,6 @@ module "cluster_sg" {
cidr_blocks = var.admin_allowed_ips
description = "Talos API Access"
},
-{%- elif copier__operating_system == "k3s" %}
- {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = var.admin_allowed_ips
- description = "Talos API Access"
- },
-{%- endif %}
]
egress_with_cidr_blocks = [
diff --git a/template/terraform/modules/base/variables.tf b/template/terraform/modules/base/variables.tf
index 83eab28..53f249d 100644
--- a/template/terraform/modules/base/variables.tf
+++ b/template/terraform/modules/base/variables.tf
@@ -33,36 +33,14 @@ variable "domain_name" {
default = "{{ copier__domain_name }}"
}
-variable "api_domain_name" {
- type = string
- default = "api.{{ copier__domain_name }}"
-}
-
variable "cluster_domain_name" {
type = string
default = "k8s.{{ copier__domain_name }}"
}
-{% if copier__create_nextjs_frontend %}
-variable "nextjs_domain_name" {
- type = string
- default = "nextjs.{{ copier__domain_name }}"
-}
-{% endif %}
-
-variable "argocd_domain_name" {
- type = string
- default = "argocd.{{ copier__domain_name }}"
-}
-
-
-variable "prometheus_domain_name" {
- type = string
- default = "prometheus.{{ copier__domain_name }}"
-}
variable "kubernetes_version" {
- description = "Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk or k3s version will be used"
+ description = "Kubernetes version to use for the cluster, if not set the k8s version shipped with the Talos SDK will be used"
type = string
default = null
}
@@ -92,37 +70,11 @@ variable "cluster_vpc_cidr" {
default = "172.16.0.0/16"
}
-{% if copier__operating_system == "talos" %}
variable "config_patch_files" {
description = "Path to talos config path files that applies to all nodes"
type = list(string)
default = []
}
-{%- endif %}
-
-variable "repo_name" {
- type = string
- default = "{{ copier__repo_name }}"
-}
-
-variable "repo_url" {
- type = string
- default = "{{ copier__repo_url }}"
-}
-
-{% if copier__create_nextjs_frontend %}
-variable "frontend_ecr_repo" {
- description = "The Frontend ECR repository name"
- type = string
- default = "{{ copier__project_dash }}-sandbox-frontend"
-}
-{% endif %}
-
-variable "backend_ecr_repo" {
- description = "The backend ECR repository name"
- type = string
- default = "{{ copier__project_dash }}-sandbox-backend"
-}
variable "admin_allowed_ips" {
description = "A list of CIDR blocks that are allowed to access the kubernetes api"
diff --git a/template/terraform/production/cluster.tf b/template/terraform/production/cluster.tf
index 771d737..50309a6 100644
--- a/template/terraform/production/cluster.tf
+++ b/template/terraform/production/cluster.tf
@@ -1,13 +1,10 @@
module "cluster" {
- source = "../modules/base"
- environment = "prod"
- cluster_name = "{{ copier__project_dash }}-prod"
- domain_name = "prod.{{ copier__domain_name }}"
- api_domain_name = "api.prod.{{ copier__domain_name }}"
- cluster_domain_name = "k8s.prod.{{ copier__domain_name }}"
- argocd_domain_name = "argocd.prod.{{ copier__domain_name }}"
- prometheus_domain_name = "prometheus.prod.{{ copier__domain_name }}"
- existing_hosted_zone = module.global_variables.existing_hosted_zone
+ source = "../modules/base"
+ environment = "prod"
+ cluster_name = "{{ copier__project_dash }}-prod"
+ domain_name = "prod.{{ copier__domain_name }}"
+ cluster_domain_name = "k8s.prod.{{ copier__domain_name }}"
+ existing_hosted_zone = module.global_variables.existing_hosted_zone
control_plane = {
# 2 vCPUs, 4 GiB RAM, $0.0376 per Hour
instance_type = "t3a.medium"
diff --git a/template/terraform/sandbox/cluster.tf b/template/terraform/sandbox/cluster.tf
index bb5de13..ae63af1 100644
--- a/template/terraform/sandbox/cluster.tf
+++ b/template/terraform/sandbox/cluster.tf
@@ -1,13 +1,10 @@
module "cluster" {
- source = "../modules/base"
- environment = "sandbox"
- cluster_name = "{{ copier__project_dash }}-sandbox"
- domain_name = "sandbox.{{ copier__domain_name }}"
- api_domain_name = "api.sandbox.{{ copier__domain_name }}"
- cluster_domain_name = "k8s.sandbox.{{ copier__domain_name }}"
- argocd_domain_name = "argocd.sandbox.{{ copier__domain_name }}"
- prometheus_domain_name = "prometheus.sandbox.{{ copier__domain_name }}"
- existing_hosted_zone = module.global_variables.existing_hosted_zone
+ source = "../modules/base"
+ environment = "sandbox"
+ cluster_name = "{{ copier__project_dash }}-sandbox"
+ domain_name = "sandbox.{{ copier__domain_name }}"
+ cluster_domain_name = "k8s.sandbox.{{ copier__domain_name }}"
+ existing_hosted_zone = module.global_variables.existing_hosted_zone
control_plane = {
# 2 vCPUs, 4 GiB RAM, $0.0376 per Hour
instance_type = "t3a.medium"
diff --git a/template/terraform/staging/cluster.tf b/template/terraform/staging/cluster.tf
index 45ab223..ae6aea0 100644
--- a/template/terraform/staging/cluster.tf
+++ b/template/terraform/staging/cluster.tf
@@ -1,13 +1,10 @@
module "cluster" {
- source = "../modules/base"
- environment = "staging"
- cluster_name = "{{ copier__project_dash }}-staging"
- domain_name = "staging.{{ copier__domain_name }}"
- api_domain_name = "api.staging.{{ copier__domain_name }}"
- cluster_domain_name = "k8s.staging.{{ copier__domain_name }}"
- argocd_domain_name = "argocd.staging.{{ copier__domain_name }}"
- prometheus_domain_name = "prometheus.staging.{{ copier__domain_name }}"
- existing_hosted_zone = module.global_variables.existing_hosted_zone
+ source = "../modules/base"
+ environment = "staging"
+ cluster_name = "{{ copier__project_dash }}-staging"
+ domain_name = "staging.{{ copier__domain_name }}"
+ cluster_domain_name = "k8s.staging.{{ copier__domain_name }}"
+ existing_hosted_zone = module.global_variables.existing_hosted_zone
control_plane = {
# 2 vCPUs, 4 GiB RAM, $0.0376 per Hour
instance_type = "t3a.medium"