Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 23 additions & 9 deletions lb.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,34 @@ locals {
probe_name = "es-probe-transport-internal"
}
}

# Legacy load balancer config (for backward compatibility with var.vms)
legacy_lb_config = length(var.vms) > 0 ? {
legacy = {
name = "ccd-internal-${var.env}-lb"
resource_group_name = "ccd-elastic-search-${var.env}"
lb_private_ip_address = var.lb_private_ip_address
vms = var.vms
}
} : {}

# Merge legacy and cluster-based load balancers
all_load_balancers = merge(local.legacy_lb_config, local.cluster_load_balancers)
}

# Main environment load balancer
module "main_lb" {
source = "./modules/load-balancer"
# Dynamic load balancers for each cluster or legacy config
module "load_balancers" {
for_each = local.all_load_balancers
source = "./modules/load-balancer"

name = "ccd-internal-${var.env}-lb"
name = each.value.name
location = var.location
resource_group_name = "ccd-elastic-search-${var.env}"
resource_group_name = each.value.resource_group_name
subnet_id = data.azurerm_subnet.elastic-subnet.id
ip_address = var.lb_private_ip_address
frontend_name = "LBFE"
backend_name = "LBBE"
vms = var.vms
ip_address = each.value.lb_private_ip_address
frontend_name = "LBFE-${upper(each.key)}"
backend_name = "LBBE-${upper(each.key)}"
vms = each.value.vms
virtual_network_id = data.azurerm_virtual_network.core_infra_vnet.id
ports = local.lb_ports
tags = module.ctags.common_tags
Expand Down
2 changes: 1 addition & 1 deletion main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ data "azurerm_monitor_data_collection_rule" "linux_data_collection_rule" {
}

resource "azurerm_monitor_data_collection_rule_association" "linux_vm_dcra" {
for_each = var.vms
for_each = local.all_vms

name = "vm-${each.value.name}-${var.env}-dcra"
target_resource_id = module.elastic2[each.key].vm_id
Expand Down
78 changes: 78 additions & 0 deletions moved.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# Moved statements to transition from legacy vms variable to elastic_search_clusters
# These statements ensure existing resources are not destroyed and recreated
#
# Usage:
# 1. Apply these moved blocks along with your new configuration
# 2. Run: terraform plan -var-file=sandbox.tfvars
# 3. Verify that resources show as "moved" rather than "destroy/create"
# 4. Apply: terraform apply -var-file=sandbox.tfvars
# 5. After successful apply, you can optionally remove this file (moved blocks are only needed once)

# Move VM module instances from old keys to new flattened keys
# Old key format: "ccd-data-0", "ccd-data-1", etc.
# New key format: "default-0", "default-1", etc.

moved {
from = module.elastic2["ccd-data-0"]
to = module.elastic2["default-0"]
}

moved {
from = module.elastic2["ccd-data-1"]
to = module.elastic2["default-1"]
}

moved {
from = module.elastic2["ccd-data-2"]
to = module.elastic2["default-2"]
}

moved {
from = module.elastic2["ccd-data-3"]
to = module.elastic2["default-3"]
}

# Move data collection rule associations
moved {
from = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["ccd-data-0"]
to = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["default-0"]
}

moved {
from = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["ccd-data-1"]
to = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["default-1"]
}

moved {
from = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["ccd-data-2"]
to = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["default-2"]
}

moved {
from = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["ccd-data-3"]
to = azurerm_monitor_data_collection_rule_association.linux_vm_dcra["default-3"]
}

# Move load balancer from module.main_lb to module.load_balancers["legacy"]
# Note: This will only work if you still have the old vms variable defined
# If you've removed the vms variable, the load balancer will move to module.load_balancers["default"]
# and you should update the from address accordingly

moved {
from = module.main_lb
to = module.load_balancers["default"]
}

# Note: If you have kept the vms variable temporarily for backward compatibility,
# the load balancer will be under module.load_balancers["legacy"]
# In that case, use this instead:
# moved {
# from = module.main_lb
# to = module.load_balancers["legacy"]
# }
#
# After the first apply, change it to move from legacy to default:
# moved {
# from = module.load_balancers["legacy"]
# to = module.load_balancers["default"]
# }
24 changes: 12 additions & 12 deletions networking.tf
Original file line number Diff line number Diff line change
Expand Up @@ -34,23 +34,23 @@ resource "azurerm_network_security_rule" "nsg_rules" {
resource_group_name = azurerm_resource_group.this.name
network_security_group_name = azurerm_network_security_group.nsg_group.name

name = each.value.name
description = each.value.description
priority = each.value.priority
direction = each.value.direction
access = each.value.access
protocol = each.value.protocol
source_port_range = each.value.source_port_range
destination_port_range = each.value.destination_port_range
source_address_prefix = each.value.source_address_prefix
destination_port_ranges = each.value.destination_port_ranges
source_address_prefixes = each.value.source_address_prefixes
name = each.value.name
description = each.value.description
priority = each.value.priority
direction = each.value.direction
access = each.value.access
protocol = each.value.protocol
source_port_range = each.value.source_port_range
destination_port_range = each.value.destination_port_range
source_address_prefix = each.value.source_address_prefix
destination_port_ranges = each.value.destination_port_ranges
source_address_prefixes = each.value.source_address_prefixes

# Only set one of destination_address_prefix or destination_application_security_group_ids
destination_address_prefix = each.value.destination_application_security_group_ids == "id" ? null : each.value.destination_address_prefix
destination_application_security_group_ids = each.value.destination_application_security_group_ids == "id" ? [azurerm_application_security_group.this.id] : null

source_application_security_group_ids = each.value.source_application_security_group_ids == "id" ? [azurerm_application_security_group.this.id] : null
source_application_security_group_ids = each.value.source_application_security_group_ids == "id" ? [azurerm_application_security_group.this.id] : null
}

resource "azurerm_network_interface_security_group_association" "association" {
Expand Down
169 changes: 98 additions & 71 deletions sandbox.tfvars
Original file line number Diff line number Diff line change
@@ -1,75 +1,73 @@
vms = {
ccd-data-0 = {
name = "ccd-data-0"
ip = "10.100.157.10"
managed_disks = {
disk1 = {
name = "ccd-data-0-datadisk1"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "0"
}
disk2 = {
name = "ccd-data-0-datadisk2"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "1"
}
}
# LEGACY: Commented out in favor of elastic_search_clusters
# vms = {
# ccd-data-0 = {
# name = "ccd-data-0"
# ip = "10.100.157.10"
# managed_disks = {
# disk1 = {
# name = "ccd-data-0-datadisk1"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "0"
# }
# disk2 = {
# name = "ccd-data-0-datadisk2"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "1"
# }
# }
# }
# ccd-data-1 = {
# name = "ccd-data-1"
# ip = "10.100.157.11"
# managed_disks = {
# disk1 = {
# name = "ccd-data-1-datadisk1"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "0"
# }
# disk2 = {
# name = "ccd-data-1-datadisk2"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "1"
# }
# }
# }
# ccd-data-2 = {
# name = "ccd-data-2"
# ip = "10.100.157.12"
# managed_disks = {
# disk1 = {
# name = "ccd-data-2-datadisk1"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "0"
# }
# disk2 = {
# name = "ccd-data-2-datadisk2"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "1"
# }
# }
# }
# ccd-data-3 = {
# name = "ccd-data-3"
# ip = "10.100.157.13"
# managed_disks = {
# disk1 = {
# name = "ccd-data-3-datadisk1"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "0"
# }
# disk2 = {
# name = "ccd-data-3-datadisk2"
# resource_group_name = "ccd-elastic-search-sandbox"
# disk_lun = "1"
# }
# }
# }
# }

}
ccd-data-1 = {
name = "ccd-data-1"
ip = "10.100.157.11"
managed_disks = {
disk1 = {
name = "ccd-data-1-datadisk1"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "0"
}
disk2 = {
name = "ccd-data-1-datadisk2"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "1"
}
}

}
ccd-data-2 = {
name = "ccd-data-2"
ip = "10.100.157.12"
managed_disks = {
disk1 = {
name = "ccd-data-2-datadisk1"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "0"
}
disk2 = {
name = "ccd-data-2-datadisk2"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "1"
}
}

}
ccd-data-3 = {
name = "ccd-data-3"
ip = "10.100.157.13"
managed_disks = {
disk1 = {
name = "ccd-data-3-datadisk1"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "0"
}
disk2 = {
name = "ccd-data-3-datadisk2"
resource_group_name = "ccd-elastic-search-sandbox"
disk_lun = "1"
}
}

}
}

lb_private_ip_address = "10.100.157.254"
# LEGACY: No longer needed when using elastic_search_clusters with lb_private_ip_address per cluster
# lb_private_ip_address = "10.100.157.254"

soc_vault_name = "soc-sbox"

Expand Down Expand Up @@ -211,3 +209,32 @@ nsg_security_rules = {
destination_address_prefix = "*"
}
}

elastic_search_clusters = {
default = {
instance_count = 4
name_template = "ccd-data-%d"
data_disks = 2
private_ip_allocation = {
0 = "10.100.157.10"
1 = "10.100.157.11"
2 = "10.100.157.12"
3 = "10.100.157.13"
}
lb_private_ip_address = "10.100.157.254"
storage_account_type = "StandardSSD_LRS"
}
# Example: Add additional cluster for upgrade testing
# upgrade = {
# instance_count = 4
# name_template = "ccd-data-upgrade-%d"
# data_disks = 2
# private_ip_allocation = {
# 0 = "10.100.157.20"
# 1 = "10.100.157.21"
# 2 = "10.100.157.22"
# 3 = "10.100.157.23"
# }
# lb_private_ip_address = "10.100.157.253"
# }
}
29 changes: 24 additions & 5 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,28 @@ variable "vms" {
disk_lun = string
}))
}))
default = {
}
default = {}
description = "DEPRECATED: Use elastic_search_clusters instead. VM configuration for backward compatibility."
}

variable "elastic_search_clusters" {
type = map(object({
instance_count = number
name_template = string
data_disks = number
private_ip_allocation = optional(map(string), {})
resource_group_name = optional(string)
vm_publisher_name = optional(string)
vm_offer = optional(string)
vm_sku = optional(string)
vm_version = optional(string)
vm_size = optional(string)
availability_set_name = optional(string)
lb_private_ip_address = optional(string)
storage_account_type = optional(string, "StandardSSD_LRS")
}))
default = {}
description = "Configuration for Elasticsearch clusters. Each cluster will create multiple VMs based on instance_count."
}

variable "vms_demo_int" {
Expand All @@ -138,9 +158,8 @@ variable "vms_demo_int" {
attachment_create_option = optional(string, "Empty")
}))
}))
default = {
}
description = "VM configuration for demo-int env"
default = {}
description = "DEPRECATED: Use elastic_search_clusters instead. VM configuration for demo-int env for backward compatibility."
}

variable "enable_demo_int" {
Expand Down
Loading