Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,23 @@ variable "crdb_hostname_suffix" {
EOT
}

variable "cluster_name" {
variable "datastore_type" {
type = string
description = <<-EOT
Name of the kubernetes cluster that will host this DSS instance (should generally describe the DSS instance being hosted)
Type of datastore used

Example: `dss-che-1`
Supported technologies: cockroachdb, yugabyte
EOT

validation {
condition = contains(["cockroachdb", "yugabyte"], var.datastore_type)
error_message = "Supported technologies: cockroachdb, yugabyte"
}

default = "cockroachdb"
}


variable "node_count" {
type = number
description = <<-EOT
Expand All @@ -84,12 +92,21 @@ variable "node_count" {
EOT

validation {
condition = contains([1, 3], var.node_count)
error_message = "Currently, only 1 node or 3 nodes deployments are supported."
condition = (var.datastore_type == "cockroach" && contains([1, 3], var.node_count)) || (var.datastore_type == "yugabyte" && var.node_count > 0)
error_message = "Currently, only 1 node or 3 nodes deployments are supported for CockroachDB. If you use Yugabyte, you need to have at least one node."
}
}


variable "cluster_name" {
type = string
description = <<-EOT
Name of the kubernetes cluster that will host this DSS instance (should generally describe the DSS instance being hosted)

Example: `dss-che-1`
EOT
}

variable "kubernetes_version" {
type = string
description = <<-EOT
Expand Down
132 changes: 131 additions & 1 deletion deploy/infrastructure/dependencies/terraform-commons-dss/helm.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,15 @@ locals {
# Tanka defines itself the variable below. For helm, since we are using the official helm CRDB chart,
# the following variable has to be provided here.
helm_crdb_statefulset_name = "dss-cockroachdb"

# This pre command is used bellow in yugabyte deployments to make the local ip pointing to the public hostname we want to use, until https://github.com/yugabyte/yugabyte-db/issues/27367 is fixed
yugabyte_precommand_prefix = "sed -E \"/\\.svc\\.cluster\\.local/ s/^([0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+)([[:space:]]+)/\\1 $(echo \"$${HOSTNAMENO}."
yugabyte_precommand_suffix = ".${var.crdb_hostname_suffix}\" | sed 's/[\\/&]/\\\\&/g')\\2/\" /etc/hosts > /tmp/newhosts && /bin/cp /tmp/newhosts /etc/hosts && \\"
}

resource "local_file" "helm_chart_values" {
filename = "${local.workspace_location}/helm_values.yml"
content = yamlencode({
content = var.datastore_type == "cockroachdb" ? yamlencode({
cockroachdb = {
image = {
tag = var.crdb_image_tag
Expand Down Expand Up @@ -66,8 +70,134 @@ resource "local_file" "helm_chart_values" {
}
}

global = {
cloudProvider = var.kubernetes_cloud_provider_name
}
}) : yamlencode({
cockroachdb = {
enabled = false
}
yugabyte = {
enabled = true

resource = var.yugabyte_light_resources ? {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's remove this and address it in another PR if needed. Since there is no autoscalling, this would only change the requested resources without changing the type of nodes provisionned.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Though, I would expect to see a configuration flag similar to https://github.com/interuss/dss/pull/1190/files#diff-f08dfea212cbd81c9b94266f15255cfd4c4b381b146e7b42c2049c77fbb5560eR24 to indicate the number of nodes.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's needed when testing on a small cluster (e.g. by following the default values / README) because the services are requesting a lot of resources, I would suggest to keep it to ease quick / test deployments without having the need of spawning a big cluster.

Not sure how it's related to autoscalling however ?

(Will do add a configuration flag for the number of nodes)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(Number of nodes support added)

master = {
requests = {
cpu = "0.1"
memory = "0.5G"
}
}
tserver = {
requests = {
cpu = "0.1"
memory = "0.5G"
}
}
} : {}
enableLoadBalancer = false

replicas = {
master = var.node_count
tserver = var.node_count
totalMasters = length(var.yugabyte_external_nodes) + var.node_count
}

master = {
extraEnv = [{
name = "HOSTNAMENO"
valueFrom = {
fieldRef = {
fieldPath = "metadata.labels['apps.kubernetes.io/pod-index']"
}
}
}]
serverBroadcastAddress : "$${HOSTNAMENO}.master.${var.crdb_hostname_suffix}"
rpcBindAddress : "$${HOSTNAMENO}.master.${var.crdb_hostname_suffix}"
advanced = {
preCommands : "${local.yugabyte_precommand_prefix}master${local.yugabyte_precommand_suffix}"
}
}

tserver = {
extraEnv = [{
name = "HOSTNAMENO"
valueFrom = {
fieldRef = {
fieldPath = "metadata.labels['apps.kubernetes.io/pod-index']"
}
}
}]
serverBroadcastAddress : "$${HOSTNAMENO}.tserver.${var.crdb_hostname_suffix}"
rpcBindAddress : "$${HOSTNAMENO}.tserver.${var.crdb_hostname_suffix}"
advanced = {
preCommands : "${local.yugabyte_precommand_prefix}tserver${local.yugabyte_precommand_suffix}"
}
}

gflags = {
master = {
placement_cloud : var.yugabyte_cloud
placement_region : var.yugabyte_region
placement_zone : var.yugabyte_zone
use_private_ip : "zone"
}
tserver = {
placement_cloud : var.yugabyte_cloud
placement_region : var.yugabyte_region
placement_zone : var.yugabyte_zone
use_private_ip : "zone"
}
}

masterAddresses = join(",", concat([
for i in range(var.node_count) : format("%s.master.${var.crdb_hostname_suffix}", i)
], var.yugabyte_external_nodes))
}

loadBalancers = {
cockroachdbNodes = []

yugabyteMasterNodes = [
for ip in var.yugabyte_internal_masters_nodes[*].ip :
{
ip = ip
subnet = var.workload_subnet
}
]

yugabyteTserverNodes = [
for ip in var.yugabyte_internal_tservers_nodes[*].ip :
{
ip = ip
subnet = var.workload_subnet
}
]

dssGateway = {
ip = var.ip_gateway
subnet = var.workload_subnet
certName = var.gateway_cert_name
sslPolicy = var.ssl_policy
}
}

dss = {
image = var.image

conf = {
pubKeys = [
"/test-certs/auth2.pem"
]
jwksEndpoint = var.authorization.jwks != null ? var.authorization.jwks.endpoint : ""
jwksKeyIds = var.authorization.jwks != null ? [var.authorization.jwks.key_id] : []
hostname = var.app_hostname
enableScd = var.enable_scd
}
}

global = {
cloudProvider = var.kubernetes_cloud_provider_name
}
})

}
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@

resource "local_file" "make_certs" {
count = var.datastore_type == "cockroachdb" ? 1 : 0
content = templatefile("${path.module}/templates/make-certs.sh.tmp", {
cluster_context = var.kubernetes_context_name
namespace = var.kubernetes_namespace
Expand All @@ -10,13 +11,25 @@ resource "local_file" "make_certs" {
}

resource "local_file" "apply_certs" {
count = var.datastore_type == "cockroachdb" ? 1 : 0
content = templatefile("${path.module}/templates/apply-certs.sh.tmp", {
cluster_context = var.kubernetes_context_name
namespace = var.kubernetes_namespace
})
filename = "${local.workspace_location}/apply-certs.sh"
}

resource "local_file" "dss_certs" {
count = var.datastore_type == "yugabyte" ? 1 : 0
content = templatefile("${path.module}/templates/dss-certs.sh.tmp", {
cluster_context = var.kubernetes_context_name
namespace = var.kubernetes_namespace
crdb_hostname_suffix = var.crdb_hostname_suffix
node_count = var.node_count
})
filename = "${local.workspace_location}/dss-certs.sh"
}

resource "local_file" "get_credentials" {
content = templatefile("${path.module}/templates/get-credentials.sh.tmp", {
get_credentials_cmd = var.kubernetes_get_credentials_cmd
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/usr/bin/env bash

# This file was automatically generated by terraform-commons-dss.
# Do not edit it directly.

set -eo pipefail

OS=$(uname)
if [[ "$OS" == "Darwin" ]]; then
# OSX uses BSD readlink
BASEDIR="$(dirname "$0")"
else
BASEDIR=$(readlink -e "$(dirname "$0")")
fi
cd "$BASEDIR/../../../deploy/operations/certificates-management/" || exit 1

./dss-certs.py --name ${cluster_context} --organization default_orga --cluster-context ${cluster_context} --nodes-public-address "<ID>.<TYPE>.${crdb_hostname_suffix}" --namespace ${namespace} --nodes-count ${node_count} "$@"
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,39 @@ variable "crdb_hostname_suffix" {
EOT
}

variable "datastore_type" {
type = string
description = <<-EOT
Type of datastore used

Supported technologies: cockroachdb, yugabyte
EOT

validation {
condition = contains(["cockroachdb", "yugabyte"], var.datastore_type)
error_message = "Supported technologies: cockroachdb, yugabyte"
}

default = "cockroachdb"
}


variable "node_count" {
type = number
description = <<-EOT
Number of Kubernetes nodes which should correspond to the desired CockroachDB nodes.
Currently, only single node or three nodes deployments are supported.

Example: `3`
EOT

validation {
condition = (var.datastore_type == "cockroach" && contains([1, 3], var.node_count)) || (var.datastore_type == "yugabyte" && var.node_count > 0)
error_message = "Currently, only 1 node or 3 nodes deployments are supported for CockroachDB. If you use Yugabyte, you need to have at least one node."
}
}


variable "image" {
type = string
description = <<-EOT
Expand Down Expand Up @@ -225,3 +258,62 @@ variable "kubernetes_namespace" {
}
}

variable "yugabyte_cloud" {
type = string
description = <<-EOT
Cloud of yugabyte instances, used for partionning.

Should be set to dss unless you're doing advanced partitionning.
EOT

default = "dss"
}


variable "yugabyte_region" {
type = string
description = <<-EOT
Region of yugabyte instances, used for partionning.

Should be different from others USS in a cluster.
EOT

default = "uss-1"
}


variable "yugabyte_zone" {
type = string
description = <<-EOT
Zone of yugabyte instances, used for partionning.

Should be set to zone unless you're doing advanced partitionning.
EOT

default = "zone"
}


variable "yugabyte_light_resources" {
type = bool
description = <<-EOT
Enable light resources reservation for yugabyte instances.

Useful for a dev cluster when you don't want to overload your kubernetes cluster.
EOT

default = false
}


variable "yugabyte_external_nodes" {
type = list(string)
description = <<-EOT
Fully-qualified domain name of existing yugabyte master nodes outside of the cluster if you are joining an existing pool.
Example: ["0.master.db.dss.example.com", "1.master.db.dss.example.com", "2.master.db.dss.example.com"]
EOT
default = []
}



Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,22 @@ variable "crdb_internal_nodes" {
description = "List of the IP addresses and related dns for the Cockroach DB nodes"
}

variable "yugabyte_internal_masters_nodes" {
type = list(object({
dns = string
ip = string
}))
description = "List of the IP addresses and related dns for the Yugabyte DB master nodes"
}

variable "yugabyte_internal_tservers_nodes" {
type = list(object({
dns = string
ip = string
}))
description = "List of the IP addresses and related dns for the Yugabyte DB tserver nodes"
}

variable "ip_gateway" {
type = string
description = "IP of the gateway used by the DSS service"
Expand Down
Loading