From 98c3e5975d30790e2262cfd1c75aa2b332b25dd5 Mon Sep 17 00:00:00 2001 From: vinicius douglas cerutti Date: Tue, 6 Jan 2026 16:15:02 -0300 Subject: [PATCH 1/4] expand existing deploy docs --- docs/docs/get-started/deploy.mdx | 23 +- docs/docs/how-tos/nebari-bare-metal.mdx | 406 +++++++++++ docs/docs/how-tos/nebari-kubernets.mdx | 889 +++++++++++++++++++++--- docs/sidebars.js | 2 + 4 files changed, 1234 insertions(+), 86 deletions(-) create mode 100644 docs/docs/how-tos/nebari-bare-metal.mdx diff --git a/docs/docs/get-started/deploy.mdx b/docs/docs/get-started/deploy.mdx index d41b7c9d3..0764ee5d6 100644 --- a/docs/docs/get-started/deploy.mdx +++ b/docs/docs/get-started/deploy.mdx @@ -48,17 +48,26 @@ For instructions on installing and deploying Nebari Local, please visit [Deployi - + -This approach is recommended if you are already using Kubernetes and want to deploy Nebari on your existing cluster. +This approach is recommended if you have existing infrastructure and want to deploy Nebari on: -For instructions on installing and deploying Nebari on an existing Kubernetes cluster, please visit [How to install and setup Nebari on an existing Kubernetes infrastructure](/docs/how-tos/nebari-kubernets.mdx). +- **Pre-existing Kubernetes clusters** (EKS, AKS, GKE, or custom clusters) +- **Bare metal servers** using our K3s setup -:::note -As of now, we have only tested this functionality for AWS, but we are continuously working on expanding to other cloud providers. -::: +Choose this option if: +- You have existing cloud-managed Kubernetes clusters (EKS, AKS, GKE) +- You want to deploy on bare metal infrastructure +- You're migrating from traditional HPC systems +- You need full control over your infrastructure -You should choose another installation option, likely a cloud install if you are starting from scratch (you have no compute clusters already in place) and you desire to stand up a production instance of Nebari. +For detailed instructions: +- [Deploy Nebari on bare metal with K3s](/docs/how-tos/nebari-bare-metal.mdx) - Set up a K3s cluster on bare metal and deploy Nebari +- [Deploy Nebari on existing Kubernetes clusters](/docs/how-tos/nebari-kubernets.mdx) - Deploy on pre-existing EKS, AKS, GKE, or custom clusters + +:::tip +The bare metal deployment using [nebari-k3s](https://github.com/nebari-dev/nebari-k3s) provides a modern Kubernetes-based alternative to traditional HPC batch systems and replaces the deprecated nebari-slurm project. +::: diff --git a/docs/docs/how-tos/nebari-bare-metal.mdx b/docs/docs/how-tos/nebari-bare-metal.mdx new file mode 100644 index 000000000..093f089bf --- /dev/null +++ b/docs/docs/how-tos/nebari-bare-metal.mdx @@ -0,0 +1,406 @@ +--- +id: nebari-bare-metal +title: Deploy Nebari on Bare Metal with K3s +description: Set up a K3s cluster on bare metal machines and deploy Nebari +--- + +# Deploy Nebari on Bare Metal with K3s + +This guide walks you through deploying Nebari on bare metal infrastructure using [nebari-k3s](https://github.com/nebari-dev/nebari-k3s), an Ansible-based solution that sets up a production-ready K3s cluster with KubeVIP and MetalLB. + +## Overview + +The `nebari-k3s` project provides Ansible playbooks to: +- Deploy a lightweight K3s Kubernetes cluster on bare metal servers +- Configure KubeVIP for high-availability control plane +- Set up MetalLB for load balancing +- Prepare the cluster for Nebari deployment + +This approach is ideal for: +- On-premises deployments +- Organizations with existing bare metal infrastructure +- HPC environments transitioning from traditional batch systems +- Cost-sensitive deployments requiring full hardware control + +:::info +This solution replaces the deprecated `nebari-slurm` project, providing a modern Kubernetes-based alternative for bare metal deployments. +::: + +## Prerequisites + +### Infrastructure Requirements + +- **Minimum 3 bare metal servers** (recommended for HA): + - Control plane nodes: 8 vCPU / 32 GB RAM minimum + - Worker nodes: 4 vCPU / 16 GB RAM minimum per node + - 200 GB disk space per node + +- **Network requirements**: + - All nodes on the same subnet + - Static IP addresses assigned to each node + - SSH access to all nodes + - IP range reserved for MetalLB load balancer + - Virtual IP address for the Kubernetes API server + +### Software Requirements + +On your local machine (where you'll run Ansible): +- Python 3.8+ +- Ansible 2.10+ +- kubectl +- SSH key access to all nodes + +On bare metal nodes: +- Ubuntu 20.04+ or compatible Linux distribution +- Passwordless sudo access for the SSH user + +## Step 1: Clone nebari-k3s Repository + +```bash +git clone https://github.com/nebari-dev/nebari-k3s.git +cd nebari-k3s +``` + +## Step 2: Configure Inventory + +Create an Ansible inventory file describing your cluster: + +```yaml +# inventory.yml +all: + vars: + ansible_user: ubuntu + ansible_ssh_private_key_file: ~/.ssh/id_rsa + + # K3s configuration + k3s_version: v1.28.5+k3s1 + apiserver_endpoint: "192.168.1.100" # Virtual IP for API server + + # KubeVIP configuration + kube_vip_tag_version: "v0.7.0" + kube_vip_interface: "eth0" # Network interface for VIP + kube_vip_lb_ip_range: "192.168.1.200-192.168.1.220" # IPs for services + + # MetalLB configuration + metal_lb_ip_range: + - "192.168.1.200-192.168.1.220" + + children: + master: + hosts: + node1: + ansible_host: 192.168.1.101 + node2: + ansible_host: 192.168.1.102 + node3: + ansible_host: 192.168.1.103 + + node: + hosts: + node4: + ansible_host: 192.168.1.104 + node5: + ansible_host: 192.168.1.105 + node6: + ansible_host: 192.168.1.106 + + k3s_cluster: + children: + master: + node: +``` + +## Step 3: Run Ansible Playbook + +Deploy the K3s cluster: + +```bash +ansible-playbook -i inventory.yml playbook.yaml +``` + +This will: +1. Install K3s on all nodes +2. Configure the control plane with high availability +3. Deploy KubeVIP for API server load balancing +4. Install and configure MetalLB for service load balancing +5. Set up proper node labels and taints + +## Step 4: Sync Kubeconfig + +After the playbook completes, sync the kubeconfig to your local machine: + +```bash +# Set environment variables +export SSH_USER="ubuntu" +export SSH_HOST="192.168.1.101" # IP of any master node +export SSH_KEY_FILE="~/.ssh/id_rsa" + +# Sync kubeconfig +make kubeconfig-sync +``` + +Verify cluster access: + +```bash +kubectl get nodes -o wide +``` + +You should see all your nodes in a `Ready` state. + +## Step 5: Label Nodes for Nebari + +Nebari requires specific node labels for scheduling workloads. Label your nodes according to their roles: + +```bash +# Label control plane node(s) as general nodes +kubectl label nodes node1 node2 node3 \ + node-role.kubernetes.io/general=true + +# Label worker nodes for user workloads +kubectl label nodes node4 node5 \ + node-role.kubernetes.io/user=true + +# Label worker nodes for Dask workers +kubectl label nodes node5 node6 \ + node-role.kubernetes.io/worker=true +``` + +:::tip +You can assign multiple roles to the same node if needed. For example, a node can be both `user` and `worker`. +::: + +## Step 6: Initialize Nebari Configuration + +Now initialize Nebari for deployment on your existing cluster: + +```bash +nebari init existing \ + --project my-nebari \ + --domain nebari.example.com \ + --auth-provider github +``` + +## Step 7: Configure Nebari for Bare Metal + +Edit the generated `nebari-config.yaml` to configure it for your K3s cluster: + +```yaml +project_name: my-nebari +provider: existing +domain: nebari.example.com + +certificate: + type: lets-encrypt + acme_email: admin@example.com + acme_server: https://acme-v02.api.letsencrypt.org/directory + +security: + authentication: + type: GitHub + config: + client_id: + client_secret: + oauth_callback_url: https://nebari.example.com/hub/oauth_callback + +local: + # Specify the kubectl context name from your kubeconfig + kube_context: "default" # Or the context name from your K3s cluster + + # Configure node selectors to match your labeled nodes + node_selectors: + general: + key: node-role.kubernetes.io/general + value: "true" + user: + key: node-role.kubernetes.io/user + value: "true" + worker: + key: node-role.kubernetes.io/worker + value: "true" + +# Configure default profiles +profiles: + jupyterlab: + - display_name: Small Instance + description: 2 CPU / 8 GB RAM + default: true + kubespawner_override: + cpu_limit: 2 + cpu_guarantee: 1.5 + mem_limit: 8G + mem_guarantee: 5G + + - display_name: Medium Instance + description: 4 CPU / 16 GB RAM + kubespawner_override: + cpu_limit: 4 + cpu_guarantee: 3 + mem_limit: 16G + mem_guarantee: 10G + + dask_worker: + Small Worker: + worker_cores_limit: 2 + worker_cores: 1.5 + worker_memory_limit: 8G + worker_memory: 5G + worker_threads: 2 + + Medium Worker: + worker_cores_limit: 4 + worker_cores: 3 + worker_memory_limit: 16G + worker_memory: 10G + worker_threads: 4 + +# Optional: Configure storage class +# default_storage_class: local-path # K3s default storage class +``` + +### Important Configuration Notes + +#### Kubernetes Context + +The `kube_context` field must match the context name in your kubeconfig. To find available contexts: + +```bash +kubectl config get-contexts +``` + +Use the name from the `NAME` column in the output. + +#### Node Selectors + +Node selectors tell Nebari where to schedule different types of workloads: +- **general**: Core Nebari services (JupyterHub, monitoring, etc.) +- **user**: User JupyterLab pods +- **worker**: Dask worker pods for distributed computing + +Make sure the `key` and `value` match the labels you applied to your nodes in Step 5. + +## Step 8: Deploy Nebari + +Deploy Nebari to your K3s cluster: + +```bash +nebari deploy --config nebari-config.yaml +``` + +During deployment, you'll be prompted to update your DNS records. Add an A record pointing your domain to one of the MetalLB IP addresses. + +## Step 9: Verify Deployment + +Once deployment completes, verify all components are running: + +```bash +kubectl get pods -A +kubectl get ingress -A +``` + +Access Nebari at `https://nebari.example.com` and log in with your configured authentication provider. + +## Troubleshooting + +### Pods Not Scheduling + +If pods remain in `Pending` state: + +```bash +kubectl describe pod -n +``` + +Common issues: +- **Node selector mismatch**: Verify labels match between `nebari-config.yaml` and actual node labels +- **Insufficient resources**: Ensure nodes have enough CPU/memory available +- **Taints**: Check if nodes have taints that prevent scheduling + +### LoadBalancer Services Pending + +If services of type `LoadBalancer` remain in `Pending` state: + +```bash +kubectl get svc -A | grep LoadBalancer +``` + +Verify MetalLB is running: + +```bash +kubectl get pods -n metallb-system +``` + +Check MetalLB configuration: + +```bash +kubectl get ipaddresspool -n metallb-system +kubectl get l2advertisement -n metallb-system +``` + +### API Server Unreachable + +If you cannot connect to the cluster: + +1. Verify KubeVIP is running on control plane nodes: + ```bash + ssh ubuntu@192.168.1.101 "sudo k3s kubectl get pods -n kube-system | grep kube-vip" + ``` + +2. Check if the virtual IP is responding: + ```bash + ping 192.168.1.100 + ``` + +3. Verify the network interface is correct in your inventory configuration + +## Storage Considerations + +K3s includes a default `local-path` storage provisioner that works well for development. For production: + +- **Local storage**: K3s local-path provisioner (default) +- **Network storage**: Configure NFS, Ceph, or other storage classes +- **Cloud storage**: If running in a hybrid environment, configure cloud CSI drivers + +Example NFS storage class configuration: + +```yaml +# Add to nebari-config.yaml under theme.jupyterhub +storage_class_name: nfs-client +``` + +## Scaling Your Cluster + +### Adding Worker Nodes + +1. Add new nodes to your Ansible inventory +2. Run the playbook targeting only new nodes: + ```bash + ansible-playbook -i inventory.yml playbook.yaml --limit new-node + ``` +3. Label the new nodes for Nebari workloads + +### Upgrading K3s + +To upgrade your K3s cluster: + +1. Update `k3s_version` in your inventory +2. Run the playbook: + ```bash + ansible-playbook -i inventory.yml playbook.yaml + ``` + +:::warning +Test upgrades in a non-production environment first. Always backup your data before upgrading. +::: + +## Next Steps + +- [Configure environment management](/docs/how-tos/nebari-environment-management) +- [Set up monitoring](/docs/how-tos/setup-monitoring) +- [Configure backup strategies](/docs/how-tos/manual-backup) +- [Explore Dask for distributed computing](/docs/tutorials/using_dask) + +## Additional Resources + +- [nebari-k3s GitHub Repository](https://github.com/nebari-dev/nebari-k3s) +- [K3s Documentation](https://docs.k3s.io/) +- [KubeVIP Documentation](https://kube-vip.io/) +- [MetalLB Documentation](https://metallb.universe.tf/) diff --git a/docs/docs/how-tos/nebari-kubernets.mdx b/docs/docs/how-tos/nebari-kubernets.mdx index f117adf5a..a97e42015 100644 --- a/docs/docs/how-tos/nebari-kubernets.mdx +++ b/docs/docs/how-tos/nebari-kubernets.mdx @@ -1,19 +1,43 @@ --- id: nebari-kubernetes -title: How to deploy Nebari on pre-existing infrastructure -description: Deploying Nebari on an existing Kubernetes infrastructure +title: Deploy Nebari on Existing Kubernetes Clusters +description: Deploy Nebari on pre-existing EKS, AKS, GKE, or custom Kubernetes infrastructure --- -Nebari can also be deployed on top of Kubernetes clusters. In this documentation, -we will guide you through the process of deploying Nebari into a pre-existing Kubernetes cluster. +# Deploy Nebari on Existing Kubernetes Clusters -To make it easier for you to follow along, we will outline the steps for such deployment with a -simple infrastructure example. We will use tabs to represent the different provider -steps/configurations. Let's get started! +Nebari can be deployed on existing Kubernetes clusters across major cloud providers (AWS EKS, Azure AKS, Google Cloud GKE) or custom Kubernetes installations. This guide walks you through the process step by step. -### Evaluating the infrastructure +:::info +For bare metal deployments using K3s, see [Deploy Nebari on Bare Metal with K3s](/docs/how-tos/nebari-bare-metal.mdx). +::: -This is an optional stage and will only be used as part of this guided example, for setting up an initial infrastructure. +## Prerequisites + +Before starting, ensure you have: + +- An existing Kubernetes cluster (EKS, AKS, GKE, or custom) +- `kubectl` configured with access to your cluster +- Nebari CLI installed ([installation guide](/docs/get-started/installing-nebari)) +- Appropriate node groups/pools with sufficient resources +- DNS domain for your Nebari deployment + +## Overview + +The deployment process follows these general steps: + +1. Evaluate your existing infrastructure +2. Create/verify appropriate node groups +3. Configure `kubectl` context +4. Initialize Nebari configuration +5. Configure Nebari for your cluster +6. Deploy Nebari + +Let's walk through this process for each cloud provider. + +### Evaluating the Infrastructure + +Before deploying Nebari, review your existing cluster to ensure it meets the requirements. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -22,23 +46,22 @@ import TabItem from '@theme/TabItem'; -In this example, a basic web app is already running on an EKS cluster. Here is a tutorial on [how to set -up the Guestbook web app](https://logz.io/blog/amazon-eks-cluster/), containing more details. +#### AWS EKS Requirements -The existing EKS cluster has one Virtual Private Cloud (VPC) with three subnets, each -in its Availability Zone, and no node groups. There are three nodes running on a `t3.medium` EC2 instance, but unfortunately, -Nebari's general node group requires a more powerful instance type. +For this example, we assume you have an existing EKS cluster. If you need to create one, follow [AWS's EKS setup guide](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). -Before proceeding, ensure that the subnets can -"[automatically assign public IP addresses to instances launched into it](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip)" and -that there exists an [Identity and Access Management (IAM)](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) role with the following permissions: +Your existing EKS cluster should have: +- **VPC** with at least three subnets in different Availability Zones +- **Subnets** configured to automatically assign public IP addresses +- **IAM Role** with the following policies: + - `AmazonEKSWorkerNodePolicy` + - `AmazonEC2ContainerRegistryReadOnly` + - `AmazonEKS_CNI_Policy` -- `AmazonEKSWorkerNodePolicy` -- `AmazonEC2ContainerRegistryReadOnly` -- `AmazonEKS_CNI_Policy` +Additionally, for cluster autoscaling support, ensure the IAM role has the custom policy below:
-Custom CNI policy (Click to expand) +Custom CNI and Autoscaling Policy (Click to expand) ```json { @@ -70,7 +93,7 @@ that there exists an [Identity and Access Management (IAM)](https://docs.aws.ama "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled": [ "true" ], - "autoscaling:ResourceTag/kubernetes.io/cluster/eaeeks": [ + "autoscaling:ResourceTag/kubernetes.io/cluster/": [ "owned" ] } @@ -81,84 +104,465 @@ that there exists an [Identity and Access Management (IAM)](https://docs.aws.ama ```
+ +**Minimum Node Requirements:** +- **General nodes**: 8 vCPU / 32 GB RAM (e.g., `t3.2xlarge`) +- **User/Worker nodes**: 4 vCPU / 16 GB RAM (e.g., `t3.xlarge`) +- **Storage**: 200 GB EBS volume per node + +
+ + + +#### Azure AKS Requirements + +For Azure AKS deployments, you need an existing AKS cluster. If you don't have one, follow [Azure's AKS setup guide](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-portal). + +Your existing AKS cluster should have: +- **Resource Group** containing the AKS cluster +- **Virtual Network** with appropriate subnet sizing +- **Service Principal or Managed Identity** with required permissions: + - `Azure Kubernetes Service RBAC Cluster Admin` + - `Contributor` role on the resource group + +**Minimum Node Requirements:** +- **General nodes**: 8 vCPU / 32 GB RAM (e.g., `Standard_D8s_v3`) +- **User/Worker nodes**: 4 vCPU / 16 GB RAM (e.g., `Standard_D4s_v3`) +- **Storage**: 200 GB managed disk per node + +**Network Requirements:** +- Ensure your AKS cluster has a public IP or Load Balancer for ingress +- Configure Network Security Groups (NSGs) to allow HTTPS traffic (port 443) + +To check your current AKS configuration: + +```bash +# List your AKS clusters +az aks list --output table + +# Get cluster credentials +az aks get-credentials --resource-group --name + +# Verify node pools +az aks nodepool list --resource-group --cluster-name +``` + + + + + +#### Google Cloud GKE Requirements + +For GKE deployments, you need an existing GKE cluster. If you need to create one, follow [Google's GKE setup guide](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster). + +Your existing GKE cluster should have: +- **VPC Network** with appropriate subnet configuration +- **Service Account** with required permissions: + - `Kubernetes Engine Admin` + - `Service Account User` + - `Compute Admin` (for node management) + +**Minimum Node Requirements:** +- **General nodes**: 8 vCPU / 32 GB RAM (e.g., `n2-standard-8`) +- **User/Worker nodes**: 4 vCPU / 16 GB RAM (e.g., `n2-standard-4`) +- **Storage**: 200 GB persistent disk per node + +**Network Requirements:** +- Cluster should have HTTP(S) Load Balancing enabled +- Firewall rules allowing ingress on port 443 + +To check your current GKE configuration: + +```bash +# List your GKE clusters +gcloud container clusters list + +# Get cluster credentials +gcloud container clusters get-credentials --zone + +# Verify node pools +gcloud container node-pools list --cluster --zone +``` +
-### Creating node groups +### Creating Node Groups -Skip this step if node groups already exists. +Nebari requires three types of node groups for optimal operation: +- **general**: Core Nebari services (JupyterHub, monitoring, databases) +- **user**: User JupyterLab notebook servers +- **worker**: Dask distributed computing workers + +Skip this step if appropriate node groups already exist.
-[Follow this guide to create new node groups](https://docs.aws.amazon.com/eks/latest/userguide/create-managed-node-group.html). -Be sure to fill in the following fields carefully: - -- "Node Group configuration" - - `Name` must be either `general`, `user` or `worker` - - `Node IAM Role` must be the IAM role described proceeding -- "Node Group compute configuration" - - `Instance type` - - The recommended minimum vCPU and memory for a `general` node is 8 vCPU / 32 GB RAM - - The recommended minimum vCPU and memory for a `user` and `worker` node is 4 vCPU / 16 GB RAM - - `Disk size` - - The recommended minimum is 200 GB for the attached EBS (block-storage) -- "Node Group scaling configuration" - - `Minimum size` and `Maximum size` of 1 for the `general` node group -- "Node Group subnet configuration" - - `subnet` include all existing EKS subnets +#### Creating EKS Node Groups + +Follow [AWS's guide to create managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/create-managed-node-group.html). + +**General Node Group:** +- **Name**: `general` +- **Node IAM Role**: The IAM role with policies described above +- **Instance type**: `t3.2xlarge` or similar (8 vCPU / 32 GB RAM) +- **Disk size**: 200 GB +- **Scaling**: Min 1, Max 3, Desired 1 +- **Subnets**: Include all EKS subnets + +**User Node Group:** +- **Name**: `user` +- **Instance type**: `t3.xlarge` or similar (4 vCPU / 16 GB RAM) +- **Disk size**: 200 GB +- **Scaling**: Min 0, Max 10, Desired 1 +- **Enable autoscaling**: Yes + +**Worker Node Group:** +- **Name**: `worker` +- **Instance type**: `t3.xlarge` or similar (4 vCPU / 16 GB RAM) +- **Disk size**: 200 GB +- **Scaling**: Min 0, Max 20, Desired 1 +- **Enable autoscaling**: Yes + +Using AWS CLI: + +```bash +# Create general node group +aws eks create-nodegroup \ + --cluster-name \ + --nodegroup-name general \ + --node-role \ + --subnets \ + --scaling-config minSize=1,maxSize=3,desiredSize=1 \ + --instance-types t3.2xlarge \ + --disk-size 200 \ + --labels nodegroup=general + +# Create user node group +aws eks create-nodegroup \ + --cluster-name \ + --nodegroup-name user \ + --node-role \ + --subnets \ + --scaling-config minSize=0,maxSize=10,desiredSize=1 \ + --instance-types t3.xlarge \ + --disk-size 200 \ + --labels nodegroup=user + +# Create worker node group +aws eks create-nodegroup \ + --cluster-name \ + --nodegroup-name worker \ + --node-role \ + --subnets \ + --scaling-config minSize=0,maxSize=20,desiredSize=1 \ + --instance-types t3.xlarge \ + --disk-size 200 \ + --labels nodegroup=worker +``` + + + + + +#### Creating AKS Node Pools + +Follow [Azure's guide to add node pools](https://learn.microsoft.com/en-us/azure/aks/create-node-pools). + +**General Node Pool:** +- **Name**: `general` +- **VM Size**: `Standard_D8s_v3` (8 vCPU / 32 GB RAM) +- **Disk size**: 200 GB +- **Count**: Min 1, Max 3 +- **Labels**: `nodepool=general` + +**User Node Pool:** +- **Name**: `user` +- **VM Size**: `Standard_D4s_v3` (4 vCPU / 16 GB RAM) +- **Disk size**: 200 GB +- **Count**: Min 0, Max 10 +- **Enable autoscaling**: Yes +- **Labels**: `nodepool=user` + +**Worker Node Pool:** +- **Name**: `worker` +- **VM Size**: `Standard_D4s_v3` (4 vCPU / 16 GB RAM) +- **Disk size**: 200 GB +- **Count**: Min 0, Max 20 +- **Enable autoscaling**: Yes +- **Labels**: `nodepool=worker` + +Using Azure CLI: + +```bash +RESOURCE_GROUP="" +CLUSTER_NAME="" + +# Create general node pool +az aks nodepool add \ + --resource-group $RESOURCE_GROUP \ + --cluster-name $CLUSTER_NAME \ + --name general \ + --node-count 1 \ + --min-count 1 \ + --max-count 3 \ + --enable-cluster-autoscaler \ + --node-vm-size Standard_D8s_v3 \ + --node-osdisk-size 200 \ + --labels nodepool=general + +# Create user node pool +az aks nodepool add \ + --resource-group $RESOURCE_GROUP \ + --cluster-name $CLUSTER_NAME \ + --name user \ + --node-count 1 \ + --min-count 0 \ + --max-count 10 \ + --enable-cluster-autoscaler \ + --node-vm-size Standard_D4s_v3 \ + --node-osdisk-size 200 \ + --labels nodepool=user + +# Create worker node pool +az aks nodepool add \ + --resource-group $RESOURCE_GROUP \ + --cluster-name $CLUSTER_NAME \ + --name worker \ + --node-count 1 \ + --min-count 0 \ + --max-count 20 \ + --enable-cluster-autoscaler \ + --node-vm-size Standard_D4s_v3 \ + --node-osdisk-size 200 \ + --labels nodepool=worker +``` + + + + + +#### Creating GKE Node Pools + +Follow [Google's guide to add node pools](https://cloud.google.com/kubernetes-engine/docs/how-to/node-pools). + +**General Node Pool:** +- **Name**: `general` +- **Machine type**: `n2-standard-8` (8 vCPU / 32 GB RAM) +- **Disk size**: 200 GB +- **Count**: Min 1, Max 3 +- **Labels**: `nodepool=general` + +**User Node Pool:** +- **Name**: `user` +- **Machine type**: `n2-standard-4` (4 vCPU / 16 GB RAM) +- **Disk size**: 200 GB +- **Count**: Min 0, Max 10 +- **Enable autoscaling**: Yes +- **Labels**: `nodepool=user` + +**Worker Node Pool:** +- **Name**: `worker` +- **Machine type**: `n2-standard-4` (4 vCPU / 16 GB RAM) +- **Disk size**: 200 GB +- **Count**: Min 0, Max 20 +- **Enable autoscaling**: Yes +- **Labels**: `nodepool=worker` + +Using gcloud CLI: + +```bash +CLUSTER_NAME="" +ZONE="" # e.g., us-central1-a + +# Create general node pool +gcloud container node-pools create general \ + --cluster=$CLUSTER_NAME \ + --zone=$ZONE \ + --machine-type=n2-standard-8 \ + --disk-size=200 \ + --num-nodes=1 \ + --min-nodes=1 \ + --max-nodes=3 \ + --enable-autoscaling \ + --node-labels=nodepool=general + +# Create user node pool +gcloud container node-pools create user \ + --cluster=$CLUSTER_NAME \ + --zone=$ZONE \ + --machine-type=n2-standard-4 \ + --disk-size=200 \ + --num-nodes=1 \ + --min-nodes=0 \ + --max-nodes=10 \ + --enable-autoscaling \ + --node-labels=nodepool=user + +# Create worker node pool +gcloud container node-pools create worker \ + --cluster=$CLUSTER_NAME \ + --zone=$ZONE \ + --machine-type=n2-standard-4 \ + --disk-size=200 \ + --num-nodes=1 \ + --min-nodes=0 \ + --max-nodes=20 \ + --enable-autoscaling \ + --node-labels=nodepool=worker +```
+### Configuring kubectl Context + +Ensure you're using your cluster's kubectl context. Verify with: + +```bash +kubectl config current-context +``` + +If you need to switch contexts: + +```bash +kubectl config use-context +``` + +To list all available contexts: + +```bash +kubectl config get-contexts +``` + ### Deploying Nebari -Ensure that you are using the existing cluster's `kubectl` context. +Now you're ready to initialize and deploy Nebari on your existing cluster.
-Initialize in the usual manner: +#### Initialize Nebari Configuration -``` -python -m nebari init aws --project --domain --ci-provider github-actions --auth-provider github --auth-auto-provision +Initialize Nebari using the `existing` provider: + +```bash +nebari init existing \ + --project \ + --domain \ + --auth-provider github ``` -Then update the `nebari-config.yaml` file. The important keys to update are: +This creates a `nebari-config.yaml` file in your current directory. -- Replace `provider: aws` with `provider: existing` -- Replace `amazon_web_services` with `local` - - And update the `node_selector` and `kube_context` appropriately +#### Configure nebari-config.yaml -
-Example nebari-config.yaml (Click to expand) +Update the configuration file with your EKS-specific settings. The key sections to modify are: -``` +```yaml project_name: provider: existing domain: + certificate: - type: self-signed + type: lets-encrypt + acme_email: admin@example.com + security: authentication: type: GitHub config: - client_id: - client_secret: + client_id: + client_secret: oauth_callback_url: https:///hub/oauth_callback -... + +local: + # Set this to your EKS cluster context name + kube_context: arn:aws:eks:::cluster/ + + # Configure node selectors based on your node group labels + node_selectors: + general: + key: eks.amazonaws.com/nodegroup + value: general + user: + key: eks.amazonaws.com/nodegroup + value: user + worker: + key: eks.amazonaws.com/nodegroup + value: worker + +profiles: + jupyterlab: + - display_name: Small Instance + description: 2 CPU / 8 GB RAM + default: true + kubespawner_override: + cpu_limit: 2 + cpu_guarantee: 1.5 + mem_limit: 8G + mem_guarantee: 5G + + - display_name: Medium Instance + description: 4 CPU / 16 GB RAM + kubespawner_override: + cpu_limit: 4 + cpu_guarantee: 3 + mem_limit: 16G + mem_guarantee: 10G + + dask_worker: + Small Worker: + worker_cores_limit: 2 + worker_cores: 1.5 + worker_memory_limit: 8G + worker_memory: 5G + worker_threads: 2 + + Medium Worker: + worker_cores_limit: 4 + worker_cores: 3 + worker_memory_limit: 16G + worker_memory: 10G + worker_threads: 4 +``` + +
+Complete example nebari-config.yaml for EKS (Click to expand) + +```yaml +project_name: my-nebari +provider: existing +domain: nebari.example.com + +certificate: + type: lets-encrypt + acme_email: admin@example.com + +security: + authentication: + type: GitHub + config: + client_id: your-github-client-id + client_secret: your-github-client-secret + oauth_callback_url: https://nebari.example.com/hub/oauth_callback + ci_cd: type: github-actions branch: main + terraform_state: type: remote + namespace: dev + local: - kube_context: arn:aws:eks::xxxxxxxxxxxx:cluster/ + kube_context: arn:aws:eks:us-west-2:123456789012:cluster/my-eks-cluster node_selectors: general: key: eks.amazonaws.com/nodegroup @@ -169,25 +573,28 @@ local: worker: key: eks.amazonaws.com/nodegroup value: worker + profiles: jupyterlab: - - display_name: Small Instance - description: Stable environment with 2 cpu / 8 GB ram - default: true - kubespawner_override: - cpu_limit: 2 - cpu_guarantee: 1.5 - mem_limit: 8G - mem_guarantee: 5G - image: quansight/nebari-jupyterlab:latest - - display_name: Medium Instance - description: Stable environment with 4 cpu / 16 GB ram - kubespawner_override: - cpu_limit: 4 - cpu_guarantee: 3 - mem_limit: 16G - mem_guarantee: 10G - image: quansight/nebari-jupyterlab:latest + - display_name: Small Instance + description: 2 CPU / 8 GB RAM + default: true + kubespawner_override: + cpu_limit: 2 + cpu_guarantee: 1.5 + mem_limit: 8G + mem_guarantee: 5G + image: quansight/nebari-jupyterlab:latest + + - display_name: Medium Instance + description: 4 CPU / 16 GB RAM + kubespawner_override: + cpu_limit: 4 + cpu_guarantee: 3 + mem_limit: 16G + mem_guarantee: 10G + image: quansight/nebari-jupyterlab:latest + dask_worker: Small Worker: worker_cores_limit: 2 @@ -196,6 +603,7 @@ profiles: worker_memory: 5G worker_threads: 2 image: quansight/nebari-dask-worker:latest + Medium Worker: worker_cores_limit: 4 worker_cores: 3 @@ -203,23 +611,346 @@ profiles: worker_memory: 10G worker_threads: 4 image: quansight/nebari-dask-worker:latest + environments: -... + environment-default.yaml: + name: default + channels: + - conda-forge + dependencies: + - python=3.11 + - ipykernel + - ipywidgets ```
-Once updated, deploy Nebari. When prompted be ready to manually update the DNS record. +#### Deploy Nebari -- `local` or "existing" deployments fail if you pass `--dns-auto-provision` or `--disable-prompt` +Deploy Nebari to your EKS cluster: +```bash +nebari deploy --config nebari-config.yaml ``` -python -m nebari deploy --config nebari-config.yaml + +When prompted, update your DNS records to point your domain to the cluster's load balancer. Nebari will provide the necessary DNS configuration details during deployment. + + + + + +#### Initialize Nebari Configuration + +Initialize Nebari using the `existing` provider: + +```bash +nebari init existing \ + --project \ + --domain \ + --auth-provider github ``` -The deployment completes successfully and all the pods appear to be running and so do the pre-existing Guestbook web -app. +#### Configure nebari-config.yaml + +Update the configuration file with your AKS-specific settings: + +```yaml +project_name: +provider: existing +domain: + +certificate: + type: lets-encrypt + acme_email: admin@example.com + +security: + authentication: + type: GitHub + config: + client_id: + client_secret: + oauth_callback_url: https:///hub/oauth_callback + +local: + # Set this to your AKS cluster context name + kube_context: # e.g., "my-aks-cluster" + + # Configure node selectors based on your node pool labels + node_selectors: + general: + key: agentpool + value: general + user: + key: agentpool + value: user + worker: + key: agentpool + value: worker + +profiles: + jupyterlab: + - display_name: Small Instance + description: 2 CPU / 8 GB RAM + default: true + kubespawner_override: + cpu_limit: 2 + cpu_guarantee: 1.5 + mem_limit: 8G + mem_guarantee: 5G + + - display_name: Medium Instance + description: 4 CPU / 16 GB RAM + kubespawner_override: + cpu_limit: 4 + cpu_guarantee: 3 + mem_limit: 16G + mem_guarantee: 10G + + dask_worker: + Small Worker: + worker_cores_limit: 2 + worker_cores: 1.5 + worker_memory_limit: 8G + worker_memory: 5G + worker_threads: 2 + + Medium Worker: + worker_cores_limit: 4 + worker_cores: 3 + worker_memory_limit: 16G + worker_memory: 10G + worker_threads: 4 +``` + +:::tip AKS Node Selectors +AKS uses `agentpool` as the label key for node pools by default. If you used custom labels when creating your node pools with `--labels`, adjust the `node_selectors` accordingly. +::: + +#### Deploy Nebari + +```bash +nebari deploy --config nebari-config.yaml +``` + +Update your DNS records when prompted. You'll need to point your domain to the Azure Load Balancer IP address created by Nebari. + + + + + +#### Initialize Nebari Configuration + +Initialize Nebari using the `existing` provider: + +```bash +nebari init existing \ + --project \ + --domain \ + --auth-provider github +``` + +#### Configure nebari-config.yaml + +Update the configuration file with your GKE-specific settings: + +```yaml +project_name: +provider: existing +domain: + +certificate: + type: lets-encrypt + acme_email: admin@example.com + +security: + authentication: + type: GitHub + config: + client_id: + client_secret: + oauth_callback_url: https:///hub/oauth_callback + +local: + # Set this to your GKE cluster context name + kube_context: gke___ + + # Configure node selectors based on your node pool labels + node_selectors: + general: + key: cloud.google.com/gke-nodepool + value: general + user: + key: cloud.google.com/gke-nodepool + value: user + worker: + key: cloud.google.com/gke-nodepool + value: worker + +profiles: + jupyterlab: + - display_name: Small Instance + description: 2 CPU / 8 GB RAM + default: true + kubespawner_override: + cpu_limit: 2 + cpu_guarantee: 1.5 + mem_limit: 8G + mem_guarantee: 5G + + - display_name: Medium Instance + description: 4 CPU / 16 GB RAM + kubespawner_override: + cpu_limit: 4 + cpu_guarantee: 3 + mem_limit: 16G + mem_guarantee: 10G + + dask_worker: + Small Worker: + worker_cores_limit: 2 + worker_cores: 1.5 + worker_memory_limit: 8G + worker_memory: 5G + worker_threads: 2 + + Medium Worker: + worker_cores_limit: 4 + worker_cores: 3 + worker_memory_limit: 16G + worker_memory: 10G + worker_threads: 4 +``` + +:::tip GKE Node Selectors +GKE automatically applies the `cloud.google.com/gke-nodepool` label to nodes based on their node pool name. If you used custom labels with `--node-labels`, adjust the `node_selectors` accordingly. +::: + +#### Deploy Nebari + +```bash +nebari deploy --config nebari-config.yaml +``` + +Update your DNS records when prompted. You'll need to point your domain to the GCP Load Balancer IP address created by Nebari.
+ +## Important Configuration Notes + +### Understanding kubernetes_context + +The `kube_context` field in your `nebari-config.yaml` is criticalβ€”it tells Nebari which Kubernetes cluster to deploy to. This must exactly match a context name from your kubeconfig. + +To find your context name: + +```bash +kubectl config get-contexts +``` + +The output shows all available contexts. Use the value from the `NAME` column: + +``` +CURRENT NAME CLUSTER AUTHINFO +* arn:aws:eks:us-west-2:123456789:cluster/my-cluster arn:aws:eks:... arn:aws:eks:... + gke_my-project_us-central1_my-cluster gke_my-project_... gke_my-project_... + my-aks-cluster my-aks-cluster clusterUser_... +``` + +### Node Selectors + +Node selectors ensure Nebari components are scheduled on the appropriate nodes: + +- **general**: Core services (JupyterHub, Prometheus, etc.) - require stable, always-on nodes +- **user**: User notebook servers - benefit from autoscaling +- **worker**: Dask workers - benefit from aggressive autoscaling for compute workloads + +The node selector keys vary by provider: +- **AWS EKS**: `eks.amazonaws.com/nodegroup` +- **Azure AKS**: `agentpool` (default) or custom labels +- **GCP GKE**: `cloud.google.com/gke-nodepool` (default) or custom labels + +You can verify node labels with: + +```bash +kubectl get nodes --show-labels +``` + +## Verifying the Deployment + +After deployment completes: + +1. **Check pods are running:** + ```bash + kubectl get pods -A + ``` + +2. **Verify ingress is configured:** + ```bash + kubectl get ingress -A + ``` + +3. **Check services:** + ```bash + kubectl get svc -A + ``` + +4. **Access Nebari:** + Navigate to `https://` in your browser + +## Troubleshooting + +### Pods Stuck in Pending + +If pods remain in `Pending` state: + +```bash +kubectl describe pod -n +``` + +Common causes: +- **Node selector mismatch**: Labels in `nebari-config.yaml` don't match actual node labels +- **Insufficient resources**: Nodes don't have enough CPU/memory +- **No nodes available**: Node group/pool hasn't scaled up yet + +### Authentication Issues + +If you can't log in to Nebari: + +1. Verify OAuth application credentials in your `nebari-config.yaml` +2. Check the callback URL matches exactly: `https:///hub/oauth_callback` +3. Review JupyterHub logs: + ```bash + kubectl logs -n deployment/hub -f + ``` + +### LoadBalancer Service Pending + +If the LoadBalancer service stays in `Pending`: + +**AWS EKS:** +- Verify subnets are tagged correctly for load balancer provisioning +- Check AWS Load Balancer Controller is installed + +**Azure AKS:** +- Ensure the AKS cluster has permissions to create load balancers +- Check resource group has available quota + +**GCP GKE:** +- Verify HTTP(S) Load Balancing is enabled on the cluster +- Check firewall rules allow traffic on port 443 + +## Next Steps + +- [Configure custom environments](/docs/how-tos/nebari-environment-management) +- [Set up monitoring](/docs/how-tos/setup-monitoring) +- [Configure backup strategies](/docs/how-tos/manual-backup) +- [Explore Dask for distributed computing](/docs/tutorials/using_dask) + +## Additional Resources + +- [AWS EKS Documentation](https://docs.aws.amazon.com/eks/) +- [Azure AKS Documentation](https://learn.microsoft.com/en-us/azure/aks/) +- [Google GKE Documentation](https://cloud.google.com/kubernetes-engine/docs) +- [Kubernetes Documentation](https://kubernetes.io/docs/) diff --git a/docs/sidebars.js b/docs/sidebars.js index 4c82e8c85..6c2f92537 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -41,6 +41,7 @@ module.exports = { "tutorials/creating-new-environments", "tutorials/jupyter-scheduler", "tutorials/argo-workflows-walkthrough", + "tutorials/create-alerts", ], }, { @@ -52,6 +53,7 @@ module.exports = { "how-tos/nebari-aws", "how-tos/nebari-azure", "how-tos/nebari-kubernetes", + "how-tos/nebari-bare-metal", "how-tos/nebari-local", "how-tos/nebari-stages-directory", "how-tos/nebari-environment-management", From 62dbf81d9c69c9e19a256160e3734d8e377b9ce9 Mon Sep 17 00:00:00 2001 From: vinicius douglas cerutti Date: Tue, 6 Jan 2026 16:48:30 -0300 Subject: [PATCH 2/4] fix build & update k3s setup docs --- docs/docs/how-tos/nebari-bare-metal.mdx | 333 ++++++++++++++++++++++-- docs/sidebars.js | 1 - 2 files changed, 314 insertions(+), 20 deletions(-) diff --git a/docs/docs/how-tos/nebari-bare-metal.mdx b/docs/docs/how-tos/nebari-bare-metal.mdx index 093f089bf..52432ff83 100644 --- a/docs/docs/how-tos/nebari-bare-metal.mdx +++ b/docs/docs/how-tos/nebari-bare-metal.mdx @@ -54,6 +54,15 @@ On bare metal nodes: - Ubuntu 20.04+ or compatible Linux distribution - Passwordless sudo access for the SSH user +:::note Running Ansible +Ansible requires a Linux/Unix environment. If your workstation runs Windows: +- Use WSL2 (Windows Subsystem for Linux) +- Deploy from one of your Linux nodes (e.g., the first control plane node) +- Use a Linux VM or container + +The deployment examples below assume you're running from a Linux environment with direct SSH access to all cluster nodes. +::: + ## Step 1: Clone nebari-k3s Repository ```bash @@ -78,7 +87,7 @@ all: # KubeVIP configuration kube_vip_tag_version: "v0.7.0" - kube_vip_interface: "eth0" # Network interface for VIP + kube_vip_interface: "ens5" # Network interface for VIP (default: ens5) kube_vip_lb_ip_range: "192.168.1.200-192.168.1.220" # IPs for services # MetalLB configuration @@ -110,6 +119,206 @@ all: node: ``` +### Advanced Configuration with Custom Data Directory + +For production deployments, especially when using dedicated storage volumes, configure K3s to use a custom data directory. This is particularly important when: +- You have multiple disks (OS disk and separate data disk) +- You want to use high-performance storage for Kubernetes data +- You need to manage disk space separately for system and application data + +Create or update your `group_vars/all.yaml`: + +```yaml +--- +# K3s version to install +# Check https://github.com/k3s-io/k3s/releases for available versions +k3s_version: v1.30.2+k3s2 + +# Ansible connection user (must have passwordless sudo on all nodes) +ansible_user: ubuntu + +# Network interface used by flannel CNI for pod networking +# Run 'ip addr show' on your nodes to find the correct interface +flannel_iface: ens192 + +# ============ KubeVIP Configuration ============ +# KubeVIP provides a virtual IP for the Kubernetes API server (HA) + +# Enable ARP broadcasts for virtual IP +kube_vip_arp: true + +# Network interface where the virtual IP will be configured +# Must match the interface with connectivity to other nodes +kube_vip_interface: ens192 + +# KubeVIP container image version +kube_vip_tag_version: v0.8.2 + +# Virtual IP address for Kubernetes API server +# This IP must be: +# - In the same subnet as your nodes +# - Not currently in use by any other device +# - Accessible from all nodes +apiserver_endpoint: 192.168.1.100 + +# ============ Cluster Security ============ +# Shared secret token for K3s cluster nodes to authenticate +# IMPORTANT: Must be alphanumeric only (no special characters) +# Generate a secure random token: openssl rand -hex 20 +k3s_token: your-secure-cluster-token + +# ============ K3s Server Arguments ============ +# Additional arguments passed to K3s server nodes (control plane) +extra_server_args: >- + --tls-san {{ apiserver_endpoint }} + --disable servicelb + --disable traefik + --write-kubeconfig-mode 644 + --flannel-iface={{ flannel_iface }} + --data-dir /mnt/k3s-data + +# --tls-san: Add virtual IP to API server TLS certificate +# --disable servicelb: Disable built-in load balancer (we use MetalLB) +# --disable traefik: Disable built-in ingress (Nebari installs its own) +# --write-kubeconfig-mode 644: Make kubeconfig readable +# --flannel-iface: Network interface for pod networking +# --data-dir: Custom location for K3s data (optional, see Step 2.1) + +# ============ K3s Agent Arguments ============ +# Additional arguments passed to K3s agent nodes (workers) +extra_agent_args: >- + --flannel-iface={{ flannel_iface }} + --data-dir /mnt/k3s-data + +# ============ MetalLB Configuration ============ +# MetalLB provides LoadBalancer services on bare metal + +# MetalLB type: 'native' (recommended) or 'frr' +metal_lb_type: native + +# MetalLB mode: 'layer2' (simple ARP-based) or 'bgp' (requires BGP router) +metal_lb_mode: layer2 + +# MetalLB speaker image version +metal_lb_speaker_tag_version: v0.14.8 + +# MetalLB controller image version +metal_lb_controller_tag_version: v0.14.8 + +# IP address range for LoadBalancer services +# Can be a string or list: "192.168.1.200-192.168.1.220" or ["192.168.1.200-192.168.1.220"] +# These IPs will be assigned to Nebari's ingress and other LoadBalancer services +# Requirements: +# - Must be in the same subnet as your nodes +# - Must not overlap with DHCP ranges or other static IPs +# - Reserve enough IPs for all services (typically 5-10 is sufficient) +metal_lb_ip_range: 192.168.1.200-192.168.1.220 # Can also be a list: ["192.168.1.200-192.168.1.220"] +``` + +### Variable Reference Summary + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `k3s_version` | Yes | - | K3s version to install | +| `ansible_user` | Yes | - | SSH user with sudo access | +| `flannel_iface` | Yes | - | Network interface for pod networking | +| `kube_vip_interface` | Yes | - | Network interface for virtual IP | +| `kube_vip_tag_version` | No | v0.8.2 | KubeVIP image version | +| `kube_vip_arp` | No | true | Enable ARP for virtual IP | +| `apiserver_endpoint` | Yes | - | Virtual IP for Kubernetes API | +| `k3s_token` | Yes | - | Cluster authentication token (alphanumeric) | +| `extra_server_args` | No | - | Additional K3s server arguments | +| `extra_agent_args` | No | - | Additional K3s agent arguments | +| `metal_lb_type` | No | native | MetalLB implementation type | +| `metal_lb_mode` | No | layer2 | MetalLB operating mode | +| `metal_lb_ip_range` | Yes | - | IP range for LoadBalancer services | +metal_lb_speaker_tag_version: v0.14.8 +metal_lb_controller_tag_version: v0.14.8 +metal_lb_ip_range: 192.168.1.200-192.168.1.220 # Can also be a list: ["192.168.1.200-192.168.1.220"] +``` + +:::warning Important: Custom Data Directory +If you specify `--data-dir /mnt/k3s-data`, you **must** ensure this directory exists and is properly mounted on **all** nodes before running the Ansible playbook. See Step 2.1 below. +::: + +### Step 2.1: Prepare Storage (Required for Custom Data Directory) + +If you're using a custom data directory with dedicated storage volumes, prepare them on each node: + +#### For worker nodes with separate data disks: + +```bash +# On each node, identify the data disk +lsblk + +# Format the disk (example: /dev/sdb - verify your disk name!) +sudo mkfs.ext4 /dev/sdb + +# Create mount point +sudo mkdir -p /mnt/k3s-data + +# Add to fstab for persistence +echo '/dev/sdb /mnt/k3s-data ext4 defaults 0 0' | sudo tee -a /etc/fstab + +# Mount the disk +sudo mount -a + +# Verify +df -h /mnt/k3s-data +``` + +#### For control plane with large storage requirements (using LVM): + +If your control plane node needs flexible storage management (e.g., for backups, persistent volumes): + +```bash +# Check available volume groups +sudo vgs + +# Create logical volume (example: 1.4TB from existing volume group) +sudo lvcreate -L 1400G -n k3s-data ubuntu-vg + +# Format with XFS for better performance with large files +sudo mkfs.xfs /dev/ubuntu-vg/k3s-data + +# Create mount point +sudo mkdir -p /mnt/k3s-data + +# Add to fstab using UUID for reliability +UUID=$(sudo blkid -s UUID -o value /dev/ubuntu-vg/k3s-data) +echo "UUID=$UUID /mnt/k3s-data xfs defaults 0 2" | sudo tee -a /etc/fstab + +# Mount +sudo mount -a + +# Verify +df -h /mnt/k3s-data +lsblk +``` + +:::tip Storage Recommendations +- **XFS**: Better for large files and high I/O workloads (recommended for nodes with databases or large datasets) +- **ext4**: General purpose, good default choice for most workloads +- **Leave space for expansion**: Don't allocate 100% of available storage to allow for future growth +- **Consistent paths**: Use the same mount point (`/mnt/k3s-data`) on all nodes +::: + +### Step 2.2: Verify Network Interfaces + +Ensure you're using the correct network interface names in your configuration: + +```bash +# On each node, list network interfaces +ip addr show + +# Common interface names: +# - ens192, ens160 (VMware) +# - eth0, eth1 (AWS, some bare metal) +# - eno1, eno2 (Dell, HP servers) +``` + +Update `flannel_iface` and `kube_vip_interface` in your `group_vars/all.yaml` to match your actual interface names. + ## Step 3: Run Ansible Playbook Deploy the K3s cluster: @@ -125,13 +334,29 @@ This will: 4. Install and configure MetalLB for service load balancing 5. Set up proper node labels and taints +:::warning Known Issue: Multi-Master Join +There's a known issue in nebari-k3s where additional master nodes may fail to join the cluster correctly due to the IP filtering task returning multiple IPs. If you encounter this: + +1. Check that additional master nodes are running K3s: + ```bash + ssh user@node2 "sudo systemctl status k3s" + ``` + +2. Verify they can reach the first master node: + ```bash + ssh user@node2 "curl -k https://192.168.1.101:6443/ping" + ``` + +3. If a node is running but not joined, you may need to manually re-run the join command on that node or investigate the Ansible task that filters the flannel interface IP. +::: + ## Step 4: Sync Kubeconfig After the playbook completes, sync the kubeconfig to your local machine: ```bash # Set environment variables -export SSH_USER="ubuntu" +export SSH_USER="root" # Default: root (change if using different user) export SSH_HOST="192.168.1.101" # IP of any master node export SSH_KEY_FILE="~/.ssh/id_rsa" @@ -149,26 +374,41 @@ You should see all your nodes in a `Ready` state. ## Step 5: Label Nodes for Nebari -Nebari requires specific node labels for scheduling workloads. Label your nodes according to their roles: +Nebari requires specific node labels for scheduling workloads. For optimal resource utilization and proper workload distribution, use the recommended `node-role.nebari.io/group` label: ```bash -# Label control plane node(s) as general nodes +# Label control plane/general nodes kubectl label nodes node1 node2 node3 \ - node-role.kubernetes.io/general=true + node-role.nebari.io/group=general -# Label worker nodes for user workloads -kubectl label nodes node4 node5 \ - node-role.kubernetes.io/user=true +# Label user workload nodes +kubectl label nodes node4 \ + node-role.nebari.io/group=user -# Label worker nodes for Dask workers +# Label Dask worker nodes kubectl label nodes node5 node6 \ - node-role.kubernetes.io/worker=true + node-role.nebari.io/group=worker ``` -:::tip -You can assign multiple roles to the same node if needed. For example, a node can be both `user` and `worker`. +:::tip Node Labeling Best Practices +- **Consistent labeling**: Using `node-role.nebari.io/group` as the label key ensures consistent behavior across all Nebari components +- **Multiple roles**: A node can have multiple roles if needed (e.g., both `user` and `worker` on the same node) +- **Control plane nodes**: Typically labeled as `general` to host core Nebari services +- **Resource optimization**: Proper labeling enables Horizontal Pod Autoscaling (HPA) to fully utilize your cluster resources + +**Alternative labeling schemes** (legacy): +```bash +# These also work but are less recommended +kubectl label nodes node1 node-role.kubernetes.io/general=true +``` ::: +Verify your labels: + +```bash +kubectl get nodes --show-labels +``` + ## Step 6: Initialize Nebari Configuration Now initialize Nebari for deployment on your existing cluster: @@ -205,18 +445,18 @@ security: local: # Specify the kubectl context name from your kubeconfig kube_context: "default" # Or the context name from your K3s cluster - + # Configure node selectors to match your labeled nodes node_selectors: general: - key: node-role.kubernetes.io/general - value: "true" + key: node-role.nebari.io/group + value: general user: - key: node-role.kubernetes.io/user - value: "true" + key: node-role.nebari.io/group + value: user worker: - key: node-role.kubernetes.io/worker - value: "true" + key: node-role.nebari.io/group + value: worker # Configure default profiles profiles: @@ -366,6 +606,61 @@ Example NFS storage class configuration: storage_class_name: nfs-client ``` +## Storage Considerations + +K3s includes a default `local-path` storage provisioner that works well for development. For production: + +- **Local storage**: K3s local-path provisioner (default) +- **Network storage**: Configure NFS, Ceph, or other storage classes +- **Cloud storage**: If running in a hybrid environment, configure cloud CSI drivers + +Example NFS storage class configuration: + +```yaml +# Add to nebari-config.yaml under theme.jupyterhub +storage_class_name: nfs-client +``` + +### Migrating Existing User Data + +If you're migrating from an existing system (e.g., Slurm cluster), you can pre-populate user data: + +1. **Copy data to the storage node** (typically a control plane node with large storage): + ```bash + # From old system to new K3s storage + rsync -avhP -e ssh /old/home/ user@k3s-node:/mnt/k3s-data/backup/home/ + ``` + +2. **Note about user IDs**: User IDs in JupyterHub pods may differ from your existing system. After Nebari deployment: + - Check the UID used by JupyterHub: `kubectl exec -it jupyter- -- id` + - Adjust file ownership if needed: + ```bash + # On the storage node + sudo chown -R : /mnt/k3s-data/backup/home/ + ``` + +3. **Create persistent volume for user data** (if using custom storage): + ```yaml + apiVersion: v1 + kind: PersistentVolume + metadata: + name: user-data-pv + spec: + capacity: + storage: 1000Gi + accessModes: + - ReadWriteMany + hostPath: + path: /mnt/k3s-data/users + ``` + +:::tip User Data Best Practices +- Test data migration with a single user first +- Verify file permissions match JupyterHub pod UIDs +- Consider using NFS or similar for multi-node access to user data +- Keep backups of original data during migration +::: + ## Scaling Your Cluster ### Adding Worker Nodes diff --git a/docs/sidebars.js b/docs/sidebars.js index 6c2f92537..97be7ee30 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -41,7 +41,6 @@ module.exports = { "tutorials/creating-new-environments", "tutorials/jupyter-scheduler", "tutorials/argo-workflows-walkthrough", - "tutorials/create-alerts", ], }, { From 51b9a2a5f908db0696a64553edbbbdf3e275598a Mon Sep 17 00:00:00 2001 From: vinicius douglas cerutti Date: Tue, 6 Jan 2026 21:34:39 -0300 Subject: [PATCH 3/4] add mermaid dep, update k3s docs --- docs/docs/how-tos/nebari-bare-metal.mdx | 1322 ++++++++++++++++++++++- docs/docusaurus.config.js | 6 + docs/package.json | 1 + docs/yarn.lock | 713 +++++++++++- 4 files changed, 2029 insertions(+), 13 deletions(-) diff --git a/docs/docs/how-tos/nebari-bare-metal.mdx b/docs/docs/how-tos/nebari-bare-metal.mdx index 52432ff83..60cf5ebae 100644 --- a/docs/docs/how-tos/nebari-bare-metal.mdx +++ b/docs/docs/how-tos/nebari-bare-metal.mdx @@ -1,14 +1,1324 @@ --- id: nebari-bare-metal title: Deploy Nebari on Bare Metal with K3s -description: Set up a K3s cluster on bare metal machines and deploy Nebari +description: Step-by-step guide to deploying Nebari on bare metal infrastructure --- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + # Deploy Nebari on Bare Metal with K3s -This guide walks you through deploying Nebari on bare metal infrastructure using [nebari-k3s](https://github.com/nebari-dev/nebari-k3s), an Ansible-based solution that sets up a production-ready K3s cluster with KubeVIP and MetalLB. +This how-to guide covers deploying Nebari on bare metal infrastructure using K3s (a lightweight Kubernetes distribution). Choose the approach that best fits your needs: + +
+
+

πŸš€ Quick Start

+

Best for: Testing, development, learning

+

Time: 15-30 minutes

+

Servers: 1 node

+ Get Started β†’ +
+
+

🏭 Production Setup

+

Best for: Production workloads, HA deployments

+

Time: 2-3 hours

+

Servers: 3+ nodes

+ Get Started β†’ +
+
+ +:::info About This Guide +This replaces the deprecated `nebari-slurm` project, providing a modern Kubernetes-based approach for bare metal deployments. For cloud deployments, see [Deploy on Existing Kubernetes](/docs/how-tos/nebari-kubernets). +::: + +--- + +## Quick Start: Single-Node + +Get Nebari running quickly on a single machine for testing, development, or small-scale use. + +### Prerequisites + +
+System Requirements (click to expand) + +- One bare metal server or VM +- Ubuntu 20.04+ (or compatible Linux distribution) +- 8 vCPU / 32 GB RAM minimum +- 200 GB disk space +- Root or sudo access + +
+ +### Steps + +1. **Install K3s**: + ```bash + curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik --disable servicelb" sh - + ``` + + :::info Why These Flags? + - `--disable traefik`: Nebari installs its own ingress controller + - `--disable servicelb`: MetalLB will provide LoadBalancer services instead + ::: + +2. **Verify installation**: + ```bash + sudo k3s kubectl get nodes + ``` + + You should see your node in `Ready` state. + +3. **Install MetalLB for LoadBalancer support**: + + + + + ```bash + # Apply MetalLB manifest + sudo k3s kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.8/config/manifests/metallb-native.yaml + + # Wait for MetalLB pods to be ready + sudo k3s kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=app=metallb \ + --timeout=90s + ``` + + + + + ```bash + # Add MetalLB Helm repository + helm repo add metallb https://metallb.github.io/metallb + helm repo update + + # Install MetalLB + helm install metallb metallb/metallb --namespace metallb-system --create-namespace + ``` + + + + +4. **Configure MetalLB IP pool**: + + ```bash + cat < + + + ```bash + # Copy kubeconfig to standard location + mkdir -p ~/.kube + sudo cat /etc/rancher/k3s/k3s.yaml > ~/.kube/k3s-config + chmod 600 ~/.kube/k3s-config + ``` + + + + + ```bash + # Copy and modify kubeconfig for remote access + sudo cat /etc/rancher/k3s/k3s.yaml > ~/.kube/k3s-config + + # Replace localhost with actual server IP + sed -i 's/127.0.0.1//g' ~/.kube/k3s-config + chmod 600 ~/.kube/k3s-config + ``` + + + + +6. **Label the node** (optional but recommended): + + ```bash + # Get node name and apply labels + NODE_NAME=$(sudo k3s kubectl get nodes -o jsonpath='{.items[0].metadata.name}') + + sudo k3s kubectl label node $NODE_NAME \ + node-role.nebari.io/group=general \ + node-role.nebari.io/group=user \ + node-role.nebari.io/group=worker + ``` + +7. **Initialize Nebari**: + + ```bash + nebari init existing \ + --project my-nebari \ + --domain nebari.example.com \ + --auth-provider github + ``` + +8. **Configure `nebari-config.yaml`**: + +
+ Click to see minimal configuration + + ```yaml + provider: existing + kubeconfig_path: ~/.kube/k3s-config + kubernetes_context: default + + local: + kube_context: default + node_selectors: + general: + key: node-role.nebari.io/group + value: general + user: + key: node-role.nebari.io/group + value: user + worker: + key: node-role.nebari.io/group + value: worker + ``` + +
+ +9. **Deploy Nebari**: + + ```bash + nebari deploy -c nebari-config.yaml + ``` + +### Next Steps + +:::tip What's Next? +βœ… Update DNS A record to point to your MetalLB IP +βœ… Access Nebari at your configured domain +βœ… For production workloads, continue to the [Production Deployment](#production-deployment) section +::: + +--- + +## Production Deployment + +Deploy a high-availability Nebari cluster on multiple bare metal servers using automated configuration management. + +**When to use this**: +- βœ… Production workloads requiring high availability +- βœ… Multiple servers for resource isolation +- βœ… Need for automated cluster management +- βœ… Growing user base requiring scalability + +### Architecture Overview + +A production deployment uses: + +
+
+ K3s
+ Lightweight Kubernetes +
+
+ KubeVIP
+ Virtual IP for HA +
+
+ MetalLB
+ LoadBalancer implementation +
+
+ Ansible
+ Automation tool +
+
+ +
+ +```mermaid +%%{init: {'flowchart': {'curve': 'linear'}}}%% +flowchart TB + VIP["Virtual IP
192.168.1.100
(KubeVIP)"] + + CP1["Control Node 1
8 vCPU / 32 GB"] + CP2["Control Node 2
4 vCPU / 16 GB"] + CP3["Control Node 3
4 vCPU / 16 GB"] + + W1["User Node"] + W2["Worker Node 1"] + W3["Worker Node 2"] + + LB["MetalLB
192.168.1.200-220"] + + VIP --> CP1 + VIP --> CP2 + VIP --> CP3 + + CP1 --> W1 + CP1 --> W2 + CP1 --> W3 + + LB -.-> W1 + LB -.-> W2 +``` + +
+ +### Prerequisites + + + + +**Minimum 3 servers** (recommended 6+ for production): + +| Node Type | vCPU | RAM | Disk | Count | Purpose | +|-----------|------|-----|------|-------|----------| +| Control Plane (Primary) | 8 | 32 GB | 500 GB | 1 | K3s control + Nebari general workloads | +| Control Plane (Secondary) | 4 | 16 GB | 200 GB | 2 | K3s control (HA only) | +| Worker | 8+ | 32+ GB | 200+ GB | 3+ | User sessions, Dask workers | + +:::important Primary Control Plane Node +One control plane node should have **significantly more resources** (8 vCPU / 32 GB RAM minimum) because it will: +- Run Kubernetes control plane components (API server, scheduler, controller manager) +- Host Nebari's **general workloads** (JupyterHub, monitoring, databases) +- Serve as the primary management node + +The other control plane nodes can be smaller (4 vCPU / 16 GB RAM) as they primarily provide high availability for the Kubernetes API. +::: + +**Network requirements**: +- All servers on same network subnet +- Static IP addresses for all servers +- One virtual IP address (for Kubernetes API) +- IP range for MetalLB (5-20 addresses) + +**Understanding MetalLB IP Ranges**: + +MetalLB requires a range of IP addresses to assign to Kubernetes LoadBalancer services (like Nebari's ingress). Your networking setup determines how you configure this: + + + + +**Scenario**: All servers on a single internal network (e.g., `192.168.1.0/24`) + +``` +Main Router (192.168.1.1) + β”‚ + β”œβ”€β”€ K3s Nodes: 192.168.1.101-106 + └── MetalLB Range: 192.168.1.200-220 +``` + +- Use IPs from the same subnet as your nodes +- Ensure IPs are outside DHCP range +- No additional routing needed +- **Example**: `metal_lb_ip_range: 192.168.1.200-192.168.1.220` + + + + +**Scenario**: Bare metal servers with multiple network interfaces, using a dedicated network for services + +``` +Main Network (eth0/ens192) Service Network (eth1/ens224) +192.168.1.0/24 10.0.100.0/24 + β”‚ β”‚ + β”œβ”€β”€ Node Management IPs β”œβ”€β”€ MetalLB Range: 10.0.100.50-70 + └── Kubernetes API VIP └── Exposed to external network/firewall +``` + +- Add a second network interface to each node +- Configure MetalLB to use the service network range +- Route this network through your datacenter's edge router/firewall +- Allows separation of cluster traffic from user-facing services +- **Example**: `metal_lb_ip_range: 10.0.100.50-10.0.100.70` + +**Why use this?** +- Security: Separate control plane from user-facing services +- Network policies: Apply different firewall rules to service IPs +- Scalability: Easier to route/load-balance across multiple clusters +- Production standard: Matches typical datacenter network design + + + + +**Scenario**: Bare metal with routed public IPs (colocation, dedicated servers) + +``` +Internet + β”‚ +Datacenter Router (routes 203.0.113.0/28) + β”‚ + β”œβ”€β”€ Node Internal: 192.168.1.101-106 + └── MetalLB Public: 203.0.113.1-14 +``` + +- Use public IPs routed to your server rack +- Coordinate with your datacenter for IP allocation +- Configure proper firewall rules +- **Example**: `metal_lb_ip_range: 203.0.113.1-203.0.113.14` + + + + +:::info Choosing the Right Approach +- **Testing/Development**: Use simple internal network (Option 1) +- **Production on-premises**: Use dedicated network interface (Option 2) +- **Colocation/Dedicated servers**: Use routed public IPs (Option 3) +::: + + + + +**Where you run Ansible**: +- Linux/Unix environment (use WSL2 on Windows) +- Python 3.8+ +- Ansible 2.10+ +- kubectl +- SSH key access to all servers + +**Install requirements**: +```bash +# Ubuntu/Debian +sudo apt install python3-pip kubectl +pip3 install ansible + +# macOS +brew install ansible kubectl +``` + + + + +**On all cluster servers**: +- Ubuntu 20.04+ or compatible OS +- Passwordless sudo for SSH user +- Open ports: 6443, 10250, 2379-2380 + +**Setup SSH access**: +```bash +# Generate SSH key if needed +ssh-keygen -t ed25519 -C "nebari-cluster" + +# Copy to all servers +ssh-copy-id ubuntu@192.168.1.101 +ssh-copy-id ubuntu@192.168.1.102 +# ... repeat for all nodes +``` + + + + +### Step 1: Clone nebari-k3s + +```bash +git clone https://github.com/nebari-dev/nebari-k3s.git +cd nebari-k3s +``` + +### Step 2: Create Inventory + + + + +Create `inventory.yml` with your server details: + +```yaml +all: + vars: + ansible_user: ubuntu + ansible_ssh_private_key_file: ~/.ssh/id_rsa + + children: + master: + hosts: + node1: + ansible_host: 192.168.1.101 + node2: + ansible_host: 192.168.1.102 + node3: + ansible_host: 192.168.1.103 + + node: + hosts: + node4: + ansible_host: 192.168.1.104 + node5: + ansible_host: 192.168.1.105 + node6: + ansible_host: 192.168.1.106 + + k3s_cluster: + children: + master: + node: +``` + + + + +For servers with different configurations: + +```yaml +all: + vars: + ansible_user: ubuntu + ansible_ssh_private_key_file: ~/.ssh/id_rsa + + children: + master: + hosts: + node1: + ansible_host: 192.168.1.101 + flannel_iface: ens192 + node2: + ansible_host: 192.168.1.102 + flannel_iface: ens192 + node3: + ansible_host: 192.168.1.103 + flannel_iface: ens160 # Different interface + + node: + hosts: + node4: + ansible_host: 192.168.1.104 + node_labels: + - "workload=user" + node5: + ansible_host: 192.168.1.105 + node_labels: + - "workload=dask" + + k3s_cluster: + children: + master: + node: +``` + + + + +### Step 3: Configure Variables + + + + +Create `group_vars/all.yaml` with minimal required configuration: + +```yaml +--- +# K3s version +k3s_version: v1.30.2+k3s2 + +# SSH user +ansible_user: ubuntu + +# Network interface (find with: ip addr show) +flannel_iface: ens192 + +# KubeVIP Configuration +kube_vip_interface: ens192 +apiserver_endpoint: 192.168.1.100 # Virtual IP + +# Cluster token (generate with: openssl rand -hex 20) +k3s_token: your-secure-cluster-token + +# MetalLB IP range +metal_lb_ip_range: 192.168.1.200-192.168.1.220 +``` + + + + +Create `group_vars/all.yaml` with full options: + +```yaml +--- +# K3s version +k3s_version: v1.30.2+k3s2 + +# SSH user with sudo access +ansible_user: ubuntu + +# Network interface for pod networking +flannel_iface: ens192 + +# === KubeVIP Configuration (Control Plane HA) === +kube_vip_arp: true +kube_vip_interface: ens192 +kube_vip_tag_version: v0.8.2 +apiserver_endpoint: 192.168.1.100 # Virtual IP for API server + +# === Cluster Security === +# Generate with: openssl rand -hex 20 +k3s_token: your-secure-cluster-token + +# === K3s Arguments === +extra_server_args: >- + --tls-san {{ apiserver_endpoint }} + --disable servicelb + --disable traefik + --write-kubeconfig-mode 644 + --flannel-iface={{ flannel_iface }} + --data-dir /mnt/k3s-data + +extra_agent_args: >- + --flannel-iface={{ flannel_iface }} + --data-dir /mnt/k3s-data + +# === MetalLB Configuration (LoadBalancer) === +metal_lb_type: native +metal_lb_mode: layer2 +metal_lb_speaker_tag_version: v0.14.8 +metal_lb_controller_tag_version: v0.14.8 +metal_lb_ip_range: 192.168.1.200-192.168.1.220 +``` + +:::info Custom Data Directory +If using `--data-dir /mnt/k3s-data`, ensure this directory exists and is properly mounted on all nodes. See [Advanced Topics](#custom-data-directory) for storage setup. +::: + + + + +
+πŸ’‘ Network Interface Reference + +Common interface names by platform: +- **VMware**: `ens192`, `ens160` +- **AWS/Basic**: `eth0`, `eth1` +- **Dell/HP servers**: `eno1`, `eno2` + +**Find your interface:** +```bash +ip addr show +``` + +
+ +### Step 4: Deploy K3s Cluster + +:::info Deployment Time +⏱️ 10-20 minutes depending on network speed and number of nodes +::: + +Run the Ansible playbook: + +```bash +ansible-playbook -i inventory.yml playbook.yaml +``` + +
+What gets installed? + +βœ… K3s on all nodes (control plane + workers)
+βœ… KubeVIP for control plane HA
+βœ… MetalLB for LoadBalancer services
+βœ… Proper node labels and configurations
+ +
+ +### Step 5: Sync Kubeconfig + +Copy kubeconfig from cluster to your local machine: + +```bash +export SSH_USER="root" +export SSH_HOST="192.168.1.101" # Any control plane node +export SSH_KEY_FILE="~/.ssh/id_rsa" + +make kubeconfig-sync +``` + +Verify cluster access: + +```bash +kubectl get nodes -o wide +``` + +### Step 6: Label Nodes + +Label nodes for Nebari workload scheduling: + +```bash +# Control plane nodes (general workloads) +kubectl label nodes node1 node2 node3 \ + node-role.nebari.io/group=general + +# User workload nodes +kubectl label nodes node4 \ + node-role.nebari.io/group=user + +# Dask worker nodes +kubectl label nodes node5 node6 \ + node-role.nebari.io/group=worker +``` + +Verify labels: + +```bash +kubectl get nodes --show-labels +``` + +### Step 7: Initialize Nebari + +```bash +nebari init existing \ + --project my-nebari \ + --domain nebari.example.com \ + --auth-provider github +``` + +### Step 8: Configure Nebari + +Edit `nebari-config.yaml`: + +```yaml +project_name: my-nebari +provider: existing +domain: nebari.example.com + +certificate: + type: lets-encrypt + acme_email: admin@example.com + +security: + authentication: + type: GitHub + config: + client_id: + client_secret: + +local: + kube_context: default + node_selectors: + general: + key: node-role.nebari.io/group + value: general + user: + key: node-role.nebari.io/group + value: user + worker: + key: node-role.nebari.io/group + value: worker + +profiles: + jupyterlab: + - display_name: Small Instance + description: 2 CPU / 8 GB RAM + default: true + kubespawner_override: + cpu_limit: 2 + cpu_guarantee: 1.5 + mem_limit: 8G + mem_guarantee: 5G + + - display_name: Medium Instance + description: 4 CPU / 16 GB RAM + kubespawner_override: + cpu_limit: 4 + cpu_guarantee: 3 + mem_limit: 16G + mem_guarantee: 10G + + dask_worker: + Small Worker: + worker_cores_limit: 2 + worker_cores: 1.5 + worker_memory_limit: 8G + worker_memory: 5G + Medium Worker: + worker_cores_limit: 4 + worker_cores: 3 + worker_memory_limit: 16G + worker_memory: 10G +``` + +### Step 9: Deploy Nebari + +```bash +nebari deploy -c nebari-config.yaml +``` + +### Step 10: Verify Deployment + + + + +```bash +# Check if all pods are running +kubectl get pods -A +``` + +All pods should be in `Running` or `Completed` state. + + + + +```bash +# 1. Check all pods +kubectl get pods -A + +# 2. Check ingress services +kubectl get ingress -A + +# 3. Verify LoadBalancer IPs assigned +kubectl get svc -A | grep LoadBalancer + +# 4. Check Nebari namespaces +kubectl get pods -n nebari +kubectl get pods -n jhub +``` + + + + +```bash +# Check pod status with details +kubectl get pods -A -o wide + +# Check events for errors +kubectl get events -A --sort-by='.lastTimestamp' + +# Check specific pod logs +kubectl logs -n + +# Describe problematic pods +kubectl describe pod -n +``` + + + + +:::tip πŸŽ‰ Final Step +Update your DNS A record to point to one of the MetalLB IP addresses, then access Nebari at your configured domain. +::: + +--- + +## Reference + +### Configuration Variables + +
+ +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `k3s_version` | βœ… Yes | - | K3s version (e.g., v1.30.2+k3s2) | +| `ansible_user` | βœ… Yes | - | SSH user with passwordless sudo | +| `flannel_iface` | βœ… Yes | - | Network interface for pod networking | +| `kube_vip_interface` | βœ… Yes | - | Network interface for virtual IP | +| `kube_vip_tag_version` | ❌ No | v0.8.2 | KubeVIP container version | +| `apiserver_endpoint` | βœ… Yes | - | Virtual IP for Kubernetes API | +| `k3s_token` | βœ… Yes | - | Cluster auth token (alphanumeric) | +| `metal_lb_ip_range` | βœ… Yes | - | IP range for LoadBalancer services | +| `metal_lb_type` | ❌ No | native | MetalLB type (native/frr) | +| `metal_lb_mode` | ❌ No | layer2 | MetalLB mode (layer2/bgp) | + +
+ +### Node Selector Labels + +
+
+ πŸ“Š general
+ Purpose: Core services (JupyterHub, monitoring)
+ Typical nodes: Control plane nodes
+ node-role.nebari.io/group=general +
+
+ πŸ‘₯ user
+ Purpose: User JupyterLab sessions
+ Typical nodes: Dedicated user nodes
+ node-role.nebari.io/group=user +
+
+ βš™οΈ worker
+ Purpose: Dask workers, batch jobs
+ Typical nodes: High-resource worker nodes
+ node-role.nebari.io/group=worker +
+
+ +--- + +## Troubleshooting + +### Pods Not Scheduling + +:::caution Symptom +🚨 Pods remain in `Pending` state +::: + +**Quick diagnosis:** +```bash +kubectl describe pod -n +``` + +
+Common Causes & Solutions + +**1. Node labels don't match selectors** +```bash +# Check actual labels +kubectl get nodes --show-labels + +# Compare with nebari-config.yaml node_selectors +# Fix: Apply correct labels +kubectl label node node-role.nebari.io/group= +``` + +**2. Insufficient resources** +```bash +# Check node resources +kubectl describe nodes +kubectl top nodes # Requires metrics-server + +# Fix: Add more nodes or adjust resource requests +``` + +**3. Node taints** +```bash +# Check for taints +kubectl get nodes -o json | jq '.items[].spec.taints' + +# Fix: Remove unwanted taints +kubectl taint nodes - +``` + +
+ +--- + +### LoadBalancer Service Pending + +:::caution Symptom +🚨 Service stuck in `Pending` with no external IP +::: + +**Quick diagnosis:** +```bash +kubectl get svc -A | grep LoadBalancer +kubectl get pods -n metallb-system +``` + +
+MetalLB Troubleshooting Steps + +**1. Verify MetalLB is running** +```bash +# Check MetalLB pods +kubectl get pods -n metallb-system + +# All pods should be Running +``` + +**2. Check MetalLB configuration** +```bash +# Verify IP pool +kubectl get ipaddresspool -n metallb-system -o yaml + +# Verify L2 advertisement +kubectl get l2advertisement -n metallb-system -o yaml +``` + +**3. Check for IP conflicts** +```bash +# Ping IPs in your range to check if already in use +ping 192.168.1.200 + +# Check MetalLB logs +kubectl logs -n metallb-system -l app=metallb --tail=50 +``` + +**4. Common fixes** +- Ensure IP range doesn't overlap with DHCP +- Verify IPs are in same subnet as nodes +- Check firewall rules allow ARP traffic + +
+ +--- + +### API Server Unreachable + +:::caution Symptom +🚨 Cannot connect to cluster with kubectl +::: + +**Quick diagnosis:** +```bash +# Test virtual IP connectivity +ping + +# Test API server port +telnet 6443 +``` + +
+KubeVIP Troubleshooting Steps + +**1. Check KubeVIP status** +```bash +# SSH to a control plane node +ssh ubuntu@ + +# Check KubeVIP pods +sudo k3s kubectl get pods -n kube-system | grep kube-vip + +# Check KubeVIP logs +sudo k3s kubectl logs -n kube-system +``` + +**2. Verify network configuration** +```bash +# Check if virtual IP is assigned +ip addr show | grep + +# Verify correct interface +ip addr show +``` + +**3. Common fixes** +- Verify `kube_vip_interface` matches actual network interface +- Ensure virtual IP is in same subnet as nodes +- Check firewall allows traffic on port 6443 +- Verify ARP is enabled (`kube_vip_arp: true`) + +
+ +--- + +## Advanced Topics + +### Custom Data Directory + +For production with dedicated storage volumes, configure K3s to use custom data directories. + +**Why**: Separate OS and application data, use high-performance storage, better disk management. + +Add to `group_vars/all.yaml`: +```yaml +extra_server_args: >- + --data-dir /mnt/k3s-data + [... other args ...] + +extra_agent_args: >- + --data-dir /mnt/k3s-data +``` + +**Prepare storage on each node**: + +```bash +# For standard disk +sudo mkfs.ext4 /dev/sdb +sudo mkdir -p /mnt/k3s-data +echo '/dev/sdb /mnt/k3s-data ext4 defaults 0 0' | sudo tee -a /etc/fstab +sudo mount -a + +# For LVM with XFS (better for large files) +sudo lvcreate -L 1400G -n k3s-data ubuntu-vg +sudo mkfs.xfs /dev/ubuntu-vg/k3s-data +UUID=$(sudo blkid -s UUID -o value /dev/ubuntu-vg/k3s-data) +echo "UUID=$UUID /mnt/k3s-data xfs defaults 0 2" | sudo tee -a /etc/fstab +sudo mount -a +``` + +### Storage Configuration + +K3s includes a `local-path` storage provisioner suitable for development. For production: + +**Options**: +- **Local storage**: Use K3s default `local-path` storage class +- **NFS**: Configure NFS server and use NFS storage class +- **Ceph/Rook**: Distributed storage for multi-node persistent volumes +- **Cloud CSI**: If hybrid cloud, use provider-specific CSI drivers + +**Example NFS configuration**: +```yaml +# In nebari-config.yaml +default_storage_class: nfs-client +``` + +### Migrating User Data + +When migrating from existing systems: + +1. **Copy data to storage node**: + ```bash + rsync -avhP -e ssh /old/home/ user@k3s-node:/mnt/k3s-data/backup/home/ + ``` + +2. **Check JupyterHub UIDs**: + ```bash + kubectl exec -it jupyter- -- id + ``` + +3. **Adjust ownership if needed**: + ```bash + sudo chown -R : /mnt/k3s-data/backup/home/ + ``` + +### Scaling the Cluster + +**Add worker nodes**: +1. Add nodes to `inventory.yml` +2. Run playbook targeting new nodes: + ```bash + ansible-playbook -i inventory.yml playbook.yaml --limit new-node + ``` +3. Label new nodes for Nebari + +**Upgrade K3s**: +1. Update `k3s_version` in `group_vars/all.yaml` +2. Run playbook: + ```bash + ansible-playbook -i inventory.yml playbook.yaml + ``` + +:::warning +Always test upgrades in non-production first. Backup data before upgrading. +::: + +--- + +## Next Steps + +
+
+ πŸ‘¨β€πŸ’» Environment Management
+ Configure conda environments and packages
+ β†’ Learn more +
+
+ πŸ“Š Monitoring
+ Set up Prometheus and Grafana
+ β†’ Learn more +
+
+ πŸ’Ύ Backups
+ Configure backup strategies
+ β†’ Learn more +
+
+ ⚑ Distributed Computing
+ Explore Dask for parallel processing
+ β†’ Learn more +
+
+ +## Additional Resources + +- [nebari-k3s GitHub Repository](https://github.com/nebari-dev/nebari-k3s) +- [K3s Documentation](https://docs.k3s.io/) +- [KubeVIP Documentation](https://kube-vip.io/) +- [MetalLB Documentation](https://metallb.universe.tf/) +- [DiΓ‘taxis Documentation Framework](https://diataxis.fr/) + +## Quick Start: Single-Node Setup + +If you're just getting started or want to test Nebari on bare metal, you can deploy K3s directly on a single machine. This is perfect for development, testing, or small-scale deployments. + +### Install K3s on a Single Node + +1. **Prepare your machine** (Ubuntu 20.04+ or similar): + ```bash + # Update system packages + sudo apt update && sudo apt upgrade -y + ``` + +2. **Install K3s with the default installer**: + ```bash + curl -sfL https://get.k3s.io | sh - + ``` + + This single command downloads and installs K3s, sets it up as a systemd service, and configures everything needed to run Kubernetes. + +3. **Verify K3s is running**: + ```bash + sudo k3s kubectl get nodes + ``` + + You should see your node in a "Ready" state. + +4. **Get your kubeconfig** for Nebari deployment: + ```bash + sudo cat /etc/rancher/k3s/k3s.yaml + ``` + + Copy this kubeconfig content. You'll need to: + - Save it to a file (e.g., `~/.kube/k3s-config`) + - Replace `127.0.0.1` with your server's actual IP address if deploying from another machine + +5. **Configure Nebari to use your K3s cluster**: + + In your `nebari-config.yaml`: + ```yaml + provider: existing + + kubeconfig_path: ~/.kube/k3s-config # Path to the kubeconfig file you saved + + kubernetes_context: default # The context name from your kubeconfig + + # Optional: Configure node groups + default_node_groups: + general: + instance: general-instance + min_nodes: 1 + max_nodes: 1 + user: + instance: user-instance + min_nodes: 1 + max_nodes: 1 + worker: + instance: worker-instance + min_nodes: 1 + max_nodes: 1 + + node_selectors: + general: + node-role.nebari.io/group: general + user: + node-role.nebari.io/group: user + worker: + node-role.nebari.io/group: worker + ``` + +6. **Label your node** (optional but recommended): + ```bash + # Get your node name + sudo k3s kubectl get nodes + + # Apply labels + sudo k3s kubectl label node node-role.nebari.io/group=general + sudo k3s kubectl label node node-role.nebari.io/group=user + sudo k3s kubectl label node node-role.nebari.io/group=worker + ``` + +7. **Install MetalLB for LoadBalancer support**: + + Nebari requires LoadBalancer services for ingress. Install MetalLB to provide this on bare metal: + + ```bash + # Download and apply MetalLB manifest + kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.8/config/manifests/metallb-native.yaml + + # Wait for MetalLB to be ready + kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=app=metallb \ + --timeout=90s + ``` + + Configure MetalLB with an IP address pool (adjust IP range to match your network): + + ```bash + cat <= 2.1.2 < 3" -iconv-lite@0.6.3, iconv-lite@^0.6.3: +iconv-lite@0.6, iconv-lite@0.6.3, iconv-lite@^0.6.3: version "0.6.3" resolved "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz" integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== @@ -6088,6 +6467,16 @@ internal-slot@^1.0.4, internal-slot@^1.0.7: hasown "^2.0.0" side-channel "^1.0.4" +"internmap@1 - 2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" + integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== + +internmap@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/internmap/-/internmap-1.0.1.tgz#0017cc8a3b99605f0302f2b198d272e015e5df95" + integrity sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw== + interpret@^1.0.0: version "1.4.0" resolved "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz" @@ -6662,6 +7051,13 @@ jsx-ast-utils@^3.3.5: object.assign "^4.1.4" object.values "^1.1.6" +katex@^0.16.9: + version "0.16.27" + resolved "https://registry.yarnpkg.com/katex/-/katex-0.16.27.tgz#4ecf6f620e0ca1c1a5de722e85fcdcec49086a48" + integrity sha512-aeQoDkuRWSqQN6nSvVCEFvfXdqo1OQiCmmW1kc9xSdjutPv7BGO7pqY9sQRJpMOGrEdfDgF2TfRXe5eUAD2Waw== + dependencies: + commander "^8.3.0" + keyv@^4.5.3, keyv@^4.5.4: version "4.5.4" resolved "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz" @@ -6669,6 +7065,11 @@ keyv@^4.5.3, keyv@^4.5.4: dependencies: json-buffer "3.0.1" +khroma@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/khroma/-/khroma-2.1.0.tgz#45f2ce94ce231a437cf5b63c2e886e6eb42bbbb1" + integrity sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw== + kind-of@^6.0.0, kind-of@^6.0.2: version "6.0.3" resolved "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz" @@ -6679,6 +7080,11 @@ kleur@^3.0.3: resolved "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== +kleur@^4.0.3: + version "4.1.5" + resolved "https://registry.yarnpkg.com/kleur/-/kleur-4.1.5.tgz#95106101795f7050c6c650f350c683febddb1780" + integrity sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ== + klona@^2.0.4: version "2.0.5" resolved "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz" @@ -6711,6 +7117,11 @@ launch-editor@^2.6.0: picocolors "^1.0.0" shell-quote "^1.8.1" +layout-base@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/layout-base/-/layout-base-1.0.2.tgz#1291e296883c322a9dd4c5dd82063721b53e26e2" + integrity sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg== + leven@^3.1.0: version "3.1.0" resolved "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz" @@ -6803,6 +7214,11 @@ locate-path@^7.1.0: dependencies: p-locate "^6.0.0" +lodash-es@^4.17.21: + version "4.17.22" + resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.22.tgz#eb7d123ec2470d69b911abe34f85cb694849b346" + integrity sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q== + lodash.debounce@^4.0.8: version "4.0.8" resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz" @@ -6926,6 +7342,24 @@ mdast-util-find-and-replace@^3.0.0, mdast-util-find-and-replace@^3.0.1: unist-util-is "^6.0.0" unist-util-visit-parents "^6.0.0" +mdast-util-from-markdown@^1.3.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz#9421a5a247f10d31d2faed2a30df5ec89ceafcf0" + integrity sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww== + dependencies: + "@types/mdast" "^3.0.0" + "@types/unist" "^2.0.0" + decode-named-character-reference "^1.0.0" + mdast-util-to-string "^3.1.0" + micromark "^3.0.0" + micromark-util-decode-numeric-character-reference "^1.0.0" + micromark-util-decode-string "^1.0.0" + micromark-util-normalize-identifier "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.0" + unist-util-stringify-position "^3.0.0" + uvu "^0.5.0" + mdast-util-from-markdown@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz" @@ -7112,6 +7546,13 @@ mdast-util-to-markdown@^2.0.0: unist-util-visit "^5.0.0" zwitch "^2.0.0" +mdast-util-to-string@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz#66f7bb6324756741c5f47a53557f0cbf16b6f789" + integrity sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg== + dependencies: + "@types/mdast" "^3.0.0" + mdast-util-to-string@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz" @@ -7156,11 +7597,59 @@ merge2@^1.3.0, merge2@^1.4.1: resolved "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== +mermaid@^10.4.0: + version "10.9.5" + resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-10.9.5.tgz#b10606abb323d34154751b5358ebe325a78c94e5" + integrity sha512-eRlKEjzak4z1rcXeCd1OAlyawhrptClQDo8OuI8n6bSVqJ9oMfd5Lrf3Q+TdJHewi/9AIOc3UmEo8Fz+kNzzuQ== + dependencies: + "@braintree/sanitize-url" "^6.0.1" + "@types/d3-scale" "^4.0.3" + "@types/d3-scale-chromatic" "^3.0.0" + cytoscape "^3.28.1" + cytoscape-cose-bilkent "^4.1.0" + d3 "^7.4.0" + d3-sankey "^0.12.3" + dagre-d3-es "7.0.13" + dayjs "^1.11.7" + dompurify "^3.2.4" + elkjs "^0.9.0" + katex "^0.16.9" + khroma "^2.0.0" + lodash-es "^4.17.21" + mdast-util-from-markdown "^1.3.0" + non-layered-tidy-tree-layout "^2.0.2" + stylis "^4.1.3" + ts-dedent "^2.2.0" + uuid "^9.0.0" + web-worker "^1.2.0" + methods@~1.1.2: version "1.1.2" resolved "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz" integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== +micromark-core-commonmark@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz#1386628df59946b2d39fb2edfd10f3e8e0a75bb8" + integrity sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw== + dependencies: + decode-named-character-reference "^1.0.0" + micromark-factory-destination "^1.0.0" + micromark-factory-label "^1.0.0" + micromark-factory-space "^1.0.0" + micromark-factory-title "^1.0.0" + micromark-factory-whitespace "^1.0.0" + micromark-util-character "^1.0.0" + micromark-util-chunked "^1.0.0" + micromark-util-classify-character "^1.0.0" + micromark-util-html-tag-name "^1.0.0" + micromark-util-normalize-identifier "^1.0.0" + micromark-util-resolve-all "^1.0.0" + micromark-util-subtokenize "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.1" + uvu "^0.5.0" + micromark-core-commonmark@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz" @@ -7351,6 +7840,15 @@ micromark-extension-mdxjs@^3.0.0: micromark-util-combine-extensions "^2.0.0" micromark-util-types "^2.0.0" +micromark-factory-destination@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz#eb815957d83e6d44479b3df640f010edad667b9f" + integrity sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg== + dependencies: + micromark-util-character "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.0" + micromark-factory-destination@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz" @@ -7360,6 +7858,16 @@ micromark-factory-destination@^2.0.0: micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" +micromark-factory-label@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz#cc95d5478269085cfa2a7282b3de26eb2e2dec68" + integrity sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w== + dependencies: + micromark-util-character "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.0" + uvu "^0.5.0" + micromark-factory-label@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz" @@ -7400,6 +7908,16 @@ micromark-factory-space@^2.0.0: micromark-util-character "^2.0.0" micromark-util-types "^2.0.0" +micromark-factory-title@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz#dd0fe951d7a0ac71bdc5ee13e5d1465ad7f50ea1" + integrity sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ== + dependencies: + micromark-factory-space "^1.0.0" + micromark-util-character "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.0" + micromark-factory-title@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz" @@ -7410,6 +7928,16 @@ micromark-factory-title@^2.0.0: micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" +micromark-factory-whitespace@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz#798fb7489f4c8abafa7ca77eed6b5745853c9705" + integrity sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ== + dependencies: + micromark-factory-space "^1.0.0" + micromark-util-character "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.0" + micromark-factory-whitespace@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz" @@ -7436,6 +7964,13 @@ micromark-util-character@^2.0.0: micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" +micromark-util-chunked@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz#37a24d33333c8c69a74ba12a14651fd9ea8a368b" + integrity sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ== + dependencies: + micromark-util-symbol "^1.0.0" + micromark-util-chunked@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz" @@ -7443,6 +7978,15 @@ micromark-util-chunked@^2.0.0: dependencies: micromark-util-symbol "^2.0.0" +micromark-util-classify-character@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz#6a7f8c8838e8a120c8e3c4f2ae97a2bff9190e9d" + integrity sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw== + dependencies: + micromark-util-character "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.0" + micromark-util-classify-character@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz" @@ -7452,6 +7996,14 @@ micromark-util-classify-character@^2.0.0: micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" +micromark-util-combine-extensions@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz#192e2b3d6567660a85f735e54d8ea6e3952dbe84" + integrity sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA== + dependencies: + micromark-util-chunked "^1.0.0" + micromark-util-types "^1.0.0" + micromark-util-combine-extensions@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz" @@ -7460,6 +8012,13 @@ micromark-util-combine-extensions@^2.0.0: micromark-util-chunked "^2.0.0" micromark-util-types "^2.0.0" +micromark-util-decode-numeric-character-reference@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz#b1e6e17009b1f20bc652a521309c5f22c85eb1c6" + integrity sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw== + dependencies: + micromark-util-symbol "^1.0.0" + micromark-util-decode-numeric-character-reference@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz" @@ -7467,6 +8026,16 @@ micromark-util-decode-numeric-character-reference@^2.0.0: dependencies: micromark-util-symbol "^2.0.0" +micromark-util-decode-string@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz#dc12b078cba7a3ff690d0203f95b5d5537f2809c" + integrity sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ== + dependencies: + decode-named-character-reference "^1.0.0" + micromark-util-character "^1.0.0" + micromark-util-decode-numeric-character-reference "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-decode-string@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz" @@ -7477,6 +8046,11 @@ micromark-util-decode-string@^2.0.0: micromark-util-decode-numeric-character-reference "^2.0.0" micromark-util-symbol "^2.0.0" +micromark-util-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz#92e4f565fd4ccb19e0dcae1afab9a173bbeb19a5" + integrity sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw== + micromark-util-encode@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz" @@ -7496,11 +8070,23 @@ micromark-util-events-to-acorn@^2.0.0: micromark-util-types "^2.0.0" vfile-message "^4.0.0" +micromark-util-html-tag-name@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz#48fd7a25826f29d2f71479d3b4e83e94829b3588" + integrity sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q== + micromark-util-html-tag-name@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz" integrity sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw== +micromark-util-normalize-identifier@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz#7a73f824eb9f10d442b4d7f120fecb9b38ebf8b7" + integrity sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q== + dependencies: + micromark-util-symbol "^1.0.0" + micromark-util-normalize-identifier@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz" @@ -7508,6 +8094,13 @@ micromark-util-normalize-identifier@^2.0.0: dependencies: micromark-util-symbol "^2.0.0" +micromark-util-resolve-all@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz#4652a591ee8c8fa06714c9b54cd6c8e693671188" + integrity sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA== + dependencies: + micromark-util-types "^1.0.0" + micromark-util-resolve-all@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz" @@ -7515,6 +8108,15 @@ micromark-util-resolve-all@^2.0.0: dependencies: micromark-util-types "^2.0.0" +micromark-util-sanitize-uri@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz#613f738e4400c6eedbc53590c67b197e30d7f90d" + integrity sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A== + dependencies: + micromark-util-character "^1.0.0" + micromark-util-encode "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-sanitize-uri@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz" @@ -7524,6 +8126,16 @@ micromark-util-sanitize-uri@^2.0.0: micromark-util-encode "^2.0.0" micromark-util-symbol "^2.0.0" +micromark-util-subtokenize@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz#941c74f93a93eaf687b9054aeb94642b0e92edb1" + integrity sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A== + dependencies: + micromark-util-chunked "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.0" + uvu "^0.5.0" + micromark-util-subtokenize@^2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz" @@ -7544,7 +8156,7 @@ micromark-util-symbol@^2.0.0: resolved "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz" integrity sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw== -micromark-util-types@^1.0.0: +micromark-util-types@^1.0.0, micromark-util-types@^1.0.1: version "1.1.0" resolved "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz" integrity sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg== @@ -7554,6 +8166,29 @@ micromark-util-types@^2.0.0: resolved "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz" integrity sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w== +micromark@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/micromark/-/micromark-3.2.0.tgz#1af9fef3f995ea1ea4ac9c7e2f19c48fd5c006e9" + integrity sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA== + dependencies: + "@types/debug" "^4.0.0" + debug "^4.0.0" + decode-named-character-reference "^1.0.0" + micromark-core-commonmark "^1.0.1" + micromark-factory-space "^1.0.0" + micromark-util-character "^1.0.0" + micromark-util-chunked "^1.0.0" + micromark-util-combine-extensions "^1.0.0" + micromark-util-decode-numeric-character-reference "^1.0.0" + micromark-util-encode "^1.0.0" + micromark-util-normalize-identifier "^1.0.0" + micromark-util-resolve-all "^1.0.0" + micromark-util-sanitize-uri "^1.0.0" + micromark-util-subtokenize "^1.0.0" + micromark-util-symbol "^1.0.0" + micromark-util-types "^1.0.1" + uvu "^0.5.0" + micromark@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz" @@ -7684,6 +8319,11 @@ mkdirp@0.3.0: resolved "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz" integrity sha512-OHsdUcVAQ6pOtg5JYWpCBo9W/GySVuwvP9hueRMW7UqshC0tbfzLv8wjySTPm3tfUZ/21CE9E1pJagOA91Pxew== +mri@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mri/-/mri-1.2.0.tgz#6721480fec2a11a4889861115a48b6cbe7cc8f0b" + integrity sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA== + mrmime@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz" @@ -7770,6 +8410,11 @@ node-releases@^2.0.5: resolved "https://registry.npmjs.org/node-releases/-/node-releases-2.0.5.tgz" integrity sha512-U9h1NLROZTq9uE1SNffn6WuPDg8icmi3ns4rEl/oTfIle4iLjTliCzgTsbaIFMq/Xn078/lfY/BL0GWZ+psK4Q== +non-layered-tidy-tree-layout@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz#57d35d13c356643fc296a55fb11ac15e74da7804" + integrity sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw== + nopt@1.0.10: version "1.0.10" resolved "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz" @@ -9235,6 +9880,11 @@ rimraf@^3.0.2: dependencies: glob "^7.1.3" +robust-predicates@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/robust-predicates/-/robust-predicates-3.0.2.tgz#d5b28528c4824d20fc48df1928d41d9efa1ad771" + integrity sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg== + rtl-detect@^1.0.4: version "1.0.4" resolved "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz" @@ -9257,6 +9907,18 @@ run-parallel@^1.1.9: dependencies: queue-microtask "^1.2.2" +rw@1: + version "1.3.3" + resolved "https://registry.yarnpkg.com/rw/-/rw-1.3.3.tgz#3f862dfa91ab766b14885ef4d01124bfda074fb4" + integrity sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ== + +sade@^1.7.3: + version "1.8.1" + resolved "https://registry.yarnpkg.com/sade/-/sade-1.8.1.tgz#0a78e81d658d394887be57d2a409bf703a3b2701" + integrity sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A== + dependencies: + mri "^1.1.0" + safe-array-concat@^1.1.2: version "1.1.2" resolved "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz" @@ -9982,6 +10644,11 @@ stylehacks@^6.1.1: browserslist "^4.23.0" postcss-selector-parser "^6.0.16" +stylis@^4.1.3: + version "4.3.6" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.3.6.tgz#7c7b97191cb4f195f03ecab7d52f7902ed378320" + integrity sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ== + supports-color@^5.3.0: version "5.5.0" resolved "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz" @@ -10132,6 +10799,11 @@ trough@^2.0.0: resolved "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz" integrity sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw== +ts-dedent@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/ts-dedent/-/ts-dedent-2.2.0.tgz#39e4bd297cd036292ae2394eb3412be63f563bb5" + integrity sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ== + tsconfig-paths@^3.15.0: version "3.15.0" resolved "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz" @@ -10361,6 +11033,13 @@ unist-util-stringify-position@^2.0.0: dependencies: "@types/unist" "^2.0.2" +unist-util-stringify-position@^3.0.0: + version "3.0.3" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz#03ad3348210c2d930772d64b489580c13a7db39d" + integrity sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg== + dependencies: + "@types/unist" "^2.0.0" + unist-util-stringify-position@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz" @@ -10489,6 +11168,21 @@ uuid@^8.3.2: resolved "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +uuid@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== + +uvu@^0.5.0: + version "0.5.6" + resolved "https://registry.yarnpkg.com/uvu/-/uvu-0.5.6.tgz#2754ca20bcb0bb59b64e9985e84d2e81058502df" + integrity sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA== + dependencies: + dequal "^2.0.0" + diff "^5.0.0" + kleur "^4.0.3" + sade "^1.7.3" + value-equal@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz" @@ -10572,6 +11266,11 @@ web-namespaces@^2.0.0: resolved "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz" integrity sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ== +web-worker@^1.2.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/web-worker/-/web-worker-1.5.0.tgz#71b2b0fbcc4293e8f0aa4f6b8a3ffebff733dcc5" + integrity sha512-RiMReJrTAiA+mBjGONMnjVDP2u3p9R1vkcGz6gDIrOMT3oGuYwX2WRMYI9ipkphSuE5XKEhydbhNEJh4NY9mlw== + webpack-bundle-analyzer@^4.9.0: version "4.10.2" resolved "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz" From 2db639f2220534d43b20d57be43642608468885d Mon Sep 17 00:00:00 2001 From: vinicius douglas cerutti Date: Tue, 6 Jan 2026 21:48:35 -0300 Subject: [PATCH 4/4] fix broken link --- docs/docs/how-tos/nebari-bare-metal.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/docs/how-tos/nebari-bare-metal.mdx b/docs/docs/how-tos/nebari-bare-metal.mdx index 60cf5ebae..d3ed6ebf3 100644 --- a/docs/docs/how-tos/nebari-bare-metal.mdx +++ b/docs/docs/how-tos/nebari-bare-metal.mdx @@ -29,7 +29,7 @@ This how-to guide covers deploying Nebari on bare metal infrastructure using K3s :::info About This Guide -This replaces the deprecated `nebari-slurm` project, providing a modern Kubernetes-based approach for bare metal deployments. For cloud deployments, see [Deploy on Existing Kubernetes](/docs/how-tos/nebari-kubernets). +This replaces the deprecated `nebari-slurm` project, providing a modern Kubernetes-based approach for bare metal deployments. For cloud deployments, see [Deploy on Existing Kubernetes](/docs/how-tos/nebari-kubernetes). ::: --- @@ -271,25 +271,25 @@ A production deployment uses: %%{init: {'flowchart': {'curve': 'linear'}}}%% flowchart TB VIP["Virtual IP
192.168.1.100
(KubeVIP)"] - + CP1["Control Node 1
8 vCPU / 32 GB"] CP2["Control Node 2
4 vCPU / 16 GB"] CP3["Control Node 3
4 vCPU / 16 GB"] - + W1["User Node"] W2["Worker Node 1"] W3["Worker Node 2"] - + LB["MetalLB
192.168.1.200-220"] - + VIP --> CP1 VIP --> CP2 VIP --> CP3 - + CP1 --> W1 CP1 --> W2 CP1 --> W3 - + LB -.-> W1 LB -.-> W2 ```