Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions python-pulumi/src/ptd/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,7 @@ class NodeGroupConfig:
labels: dict[str, str] = dataclasses.field(default_factory=dict)
ami_type: str | None = None # If None, will use cluster default
desired_size: int | None = None # If None, will use min_size
system_nodes: bool = False # When True, nodes are labeled posit.team/node-role=system and excluded from prepull


@dataclasses.dataclass(frozen=True)
Expand Down
2 changes: 2 additions & 0 deletions python-pulumi/src/ptd/aws_workload.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ class KarpenterNodePool:
weight: int = 100
root_volume_size: str = "100Gi"
session_taints: bool = False # Default False, opt-in for session isolation
system_nodes: bool = False # When True, nodes are labeled posit.team/node-role=system and excluded from prepull
# Overprovisioning configuration per nodepool
overprovisioning_replicas: int = 0 # Number of overprovisioning pods for this pool (0 = disabled)
overprovisioning_cpu_request: str | None = None # CPU request per overprovisioning pod
Expand Down Expand Up @@ -381,6 +382,7 @@ def _load_workload_cluster_config_dict(
weight=pool_spec.get("weight", 100),
root_volume_size=pool_spec.get("root_volume_size", "100Gi"),
session_taints=session_taints,
system_nodes=pool_spec.get("system_nodes", False),
overprovisioning_replicas=pool_spec.get("overprovisioning_replicas", 0),
overprovisioning_cpu_request=pool_spec.get("overprovisioning_cpu_request"),
overprovisioning_memory_request=pool_spec.get("overprovisioning_memory_request"),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,11 @@ def _build_with_vpc_config(
# Create additional node groups if configured
if cluster_cfg.additional_node_groups:
for ng_name, ng_config in cluster_cfg.additional_node_groups.items():
# Merge system node label if system_nodes is True
labels = dict(ng_config.labels)
if ng_config.system_nodes:
labels["posit.team/node-role"] = "system"

self._create_node_group(
cluster_release=cluster_release,
node_group_name=ng_name,
Expand All @@ -121,7 +126,7 @@ def _build_with_vpc_config(
desired_size=ng_config.desired_size
or ng_config.min_size, # Use desired_size if specified, otherwise min_size
taints=ng_config.taints,
labels=ng_config.labels,
labels=labels,
)

self._define_tigera_operator(cluster_release)
Expand Down
4 changes: 4 additions & 0 deletions python-pulumi/src/ptd/pulumi_resources/aws_workload_helm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1037,6 +1037,10 @@ def get_nodegroup_names():
"disruption": {"consolidationPolicy": "WhenEmptyOrUnderutilized", "consolidateAfter": "5m"},
}

# Add system node label if specified (prepull pods will avoid these nodes)
if node_pool.system_nodes:
nodepool_spec["template"]["metadata"] = {"labels": {"posit.team/node-role": "system"}}

# Add weight for NodePool priority
nodepool_spec["weight"] = node_pool.weight

Expand Down
Loading