diff --git a/modules/perforce/README.md b/modules/perforce/README.md index bcd7bae0..9a600eee 100644 --- a/modules/perforce/README.md +++ b/modules/perforce/README.md @@ -7,11 +7,60 @@ For a video walkthrough demonstrating how to use this module, see this YouTube V ## Features - Dynamic creation and configuration of [P4 Server (formerly Helix Core)](https://www.perforce.com/products/helix-core) +- **P4 Server Replica Support** - Deploy multiple P4 server replicas for high availability, load distribution, and geographic distribution - Dynamic creation and configuration of [P4 Code Review (formerly Helix Swarm)](https://www.perforce.com/products/helix-swarm) - Dynamic creation and configuration of [P4Auth (formerly Helix Authentication Service)](https://help.perforce.com/helix-core/integrations-plugins/helix-auth-svc/current/Content/HAS/overview-of-has.html) +## P4 Server Replica Support + +This module supports deploying P4 server replicas for: + +- **High Availability**: Multi-AZ deployment with standby replicas +- **Load Distribution**: Read-only replicas for CI/CD systems and build agents +- **Geographic Distribution**: Edge replicas for global development teams +- **Disaster Recovery**: Cross-region standby replicas + +### Replica Types + +- **Standby**: Full replica that can be promoted to primary during failover +- **Read-only**: Optimized for read operations, perfect for CI/CD systems +- **Forwarding**: Local commits forwarded to primary, ideal for small remote teams +- **Edge**: Full P4 server for major regional offices + +### Example Configuration + +```hcl +module "perforce" { + source = "./modules/perforce" + + p4_server_config = { + fully_qualified_domain_name = "perforce.yourdomain.com" + instance_subnet_id = aws_subnet.primary.id + } + + p4_server_replicas_config = { + "standby-replica" = { + replica_type = "standby" + subdomain = "standby" + vpc_id = aws_vpc.main.id + instance_subnet_id = aws_subnet.standby.id + } + "ci-replica" = { + replica_type = "readonly" + subdomain = "ci" + vpc_id = aws_vpc.main.id + instance_subnet_id = aws_subnet.ci.id + } + } +} +``` + +For complete examples, see: +- [Single-Region Replicas](./examples/replica-single-region/) - Multi-AZ high availability +- [Cross-Region Replicas](./examples/replica-cross-region/) - Geographic distribution + ## Architecture ### Full example using AWS Route53 Public Hosted Zone diff --git a/modules/perforce/REPLICA_IMPLEMENTATION.md b/modules/perforce/REPLICA_IMPLEMENTATION.md new file mode 100644 index 00000000..9c21cdc7 --- /dev/null +++ b/modules/perforce/REPLICA_IMPLEMENTATION.md @@ -0,0 +1,110 @@ +# P4 Server Replica Implementation Summary + +## Overview + +This implementation adds comprehensive P4 server replica support to the Perforce Terraform module, enabling high availability, load distribution, and geographic distribution of Perforce servers. + +## Key Features Implemented + +### 1. Enhanced Variable Structure +- Added `p4_server_replicas_config` variable with full inheritance from primary server configuration +- Supports all P4 server configuration options with replica-specific overrides +- Automatic FQDN generation with customizable subdomains + +### 2. Replica Types Supported +- **Standby**: Full replica for disaster recovery and failover +- **Read-only**: Optimized for CI/CD systems and build agents +- **Forwarding**: Local commits forwarded to primary (small remote teams) +- **Edge**: Full P4 server for major regional offices + +### 3. Infrastructure Components +- **S3 Bucket**: Stores replica configuration scripts +- **SSM Associations**: Automated configuration of primary and replica servers +- **Route53 DNS**: Automatic DNS record creation for replicas +- **Security Groups**: Proper networking for P4 replication traffic + +### 4. Configuration Scripts +- `configure_primary_for_replicas.sh`: Sets up primary server for replication +- `configure_replica.sh`: Configures replica servers based on type + +### 5. Examples Provided +- **Single-Region**: Multi-AZ deployment for high availability +- **Cross-Region**: Geographic distribution (simplified to multi-AZ for initial implementation) + +## Files Modified/Created + +### Core Module Files +- `variables.tf` - Added `p4_server_replicas_config` variable +- `main.tf` - Added replica module instantiation with inheritance logic +- `locals.tf` - Added replica domain mapping +- `outputs.tf` - Added replica outputs +- `s3.tf` - NEW: S3 bucket and script management +- `ssm.tf` - NEW: SSM associations for replica configuration +- `route53.tf` - Added replica DNS records + +### Example Configurations +- `examples/replica-single-region/` - Complete single-region replica example +- `examples/replica-cross-region/` - Multi-AZ replica example +- Both include comprehensive VPC, security, and DNS configurations + +### Documentation +- Updated main `README.md` with replica documentation +- Created detailed READMEs for both examples +- Added configuration examples and usage instructions + +### Testing +- `tests/03_p4_server_replicas.tftest.hcl` - Basic replica validation tests + +## Usage Example + +```hcl +module "perforce" { + source = "./modules/perforce" + + p4_server_config = { + fully_qualified_domain_name = "perforce.yourdomain.com" + instance_subnet_id = aws_subnet.primary.id + } + + p4_server_replicas_config = { + "standby-replica" = { + replica_type = "standby" + subdomain = "standby" + vpc_id = aws_vpc.main.id + instance_subnet_id = aws_subnet.standby.id + } + "ci-replica" = { + replica_type = "readonly" + subdomain = "ci" + vpc_id = aws_vpc.main.id + instance_subnet_id = aws_subnet.ci.id + instance_type = "c6i.xlarge" # Override for CI workloads + } + } +} +``` + +## Benefits + +1. **High Availability**: Survive single AZ failures with automatic failover +2. **Performance**: Distribute read load across multiple replicas +3. **CI/CD Optimization**: Dedicated replicas for build systems +4. **Global Teams**: Support for distributed development teams +5. **Disaster Recovery**: Cross-region standby replicas +6. **Zero Downtime**: Maintenance without service interruption + +## Implementation Notes + +- Replicas inherit all configuration from primary server by default +- Any field can be overridden per replica for customization +- Automatic script execution configures replication after deployment +- DNS records are automatically created for all replicas +- Security groups allow proper P4 replication traffic flow + +## Future Enhancements + +- True cross-region support with provider aliases +- Health check integration for automatic failover +- Monitoring and alerting for replication lag +- Backup and restore automation for replicas +- Performance optimization recommendations \ No newline at end of file diff --git a/modules/perforce/examples/replica-cross-region/README.md b/modules/perforce/examples/replica-cross-region/README.md new file mode 100644 index 00000000..6c142550 --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/README.md @@ -0,0 +1,80 @@ +# Perforce Multi-AZ Replica Example + +This example demonstrates how to deploy a Perforce server with replicas across multiple availability zones for high availability and load distribution. + +## Architecture + +- **Primary P4 Server**: `perforce.yourdomain.com` (AZ 1a) +- **Standby Replica**: `standby.perforce.yourdomain.com` (AZ 1b) - Standby replica for HA +- **Read-only Replica**: `ci.perforce.yourdomain.com` (AZ 1c) - Read-only replica for CI/CD + +## Benefits + +- **High Availability**: Survive single AZ failures within region +- **Load Distribution**: Spread read operations across replicas +- **CI/CD Optimization**: Dedicated replica for build systems +- **Zero Downtime Maintenance**: Promote standby during primary maintenance + +## Prerequisites + +1. AWS credentials configured for multiple regions +2. Route53 hosted zone for your domain +3. Sufficient service limits in all target regions + +## Usage + +1. Set your Route53 hosted zone: + ```bash + export TF_VAR_route53_public_hosted_zone_name="yourdomain.com" + ``` + +2. Deploy the infrastructure: + ```bash + terraform init + terraform plan + terraform apply + ``` + +3. Access your Perforce servers: + - Primary: `perforce.yourdomain.com:1666` + - Standby: `standby.perforce.yourdomain.com:1666` + - CI/Build: `ci.perforce.yourdomain.com:1666` + +## Multi-AZ Configuration + +### Replica Types +- **Standby**: Full replica that can be promoted to primary during failover +- **Read-only**: Optimized for read operations, perfect for CI/CD systems + +### Health Checks +- Route53 health checks monitor replica availability +- Automatic DNS failover for high availability + +## Failover Process + +### Promote Standby Replica +1. Stop primary server +2. SSH to standby replica instance +3. Run: `p4d -r /p4/1 -p 1666 -d -J off` +4. Update DNS to point primary FQDN to standby IP + +## Network Requirements + +### Security Groups +- P4 replication traffic within VPC (port 1666) +- SSH access (port 22) from your IP +- HTTP/HTTPS for web services + +## Monitoring + +- CloudWatch metrics for all instances +- Route53 health checks for replica availability +- Cross-region replication lag monitoring + +## Cleanup + +```bash +terraform destroy +``` + +**Note**: Multi-AZ resources will be destroyed in dependency order. \ No newline at end of file diff --git a/modules/perforce/examples/replica-cross-region/dns.tf b/modules/perforce/examples/replica-cross-region/dns.tf new file mode 100644 index 00000000..9b8e667c --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/dns.tf @@ -0,0 +1,119 @@ +########################################## +# Fetch Shared NLB DNS Name and Zone ID +########################################## +data "aws_lb" "shared_services_nlb" { + arn = module.terraform-aws-perforce.shared_network_load_balancer_arn + + depends_on = [module.terraform-aws-perforce] +} + +########################################## +# Fetch Route53 Public Hosted Zone for FQDN +########################################## +data "aws_route53_zone" "root" { + name = var.route53_public_hosted_zone_name + private_zone = false +} + +########################################## +# Perforce External (Public) DNS +########################################## +# Route all external web service traffic (e.g. auth.perforce.example.com, review.perforce.example.com) to the Public NLB +resource "aws_route53_record" "external_perforce_web_services" { + zone_id = data.aws_route53_zone.root.id + name = "*.${local.p4_server_fully_qualified_domain_name}" + type = "A" + alias { + name = data.aws_lb.shared_services_nlb.dns_name + zone_id = data.aws_lb.shared_services_nlb.zone_id + evaluate_target_health = true + } +} + +# Route external web service traffic to the public EIP of the P4 Server +resource "aws_route53_record" "external_perforce_p4_server" { + #checkov:skip=CKV2_AWS_23: Attached to EIP public IP + zone_id = data.aws_route53_zone.root.id + name = "perforce.${data.aws_route53_zone.root.name}" + type = "A" + ttl = 300 + records = [module.terraform-aws-perforce.p4_server_eip_public_ip] +} + +# Route external replica traffic to replica EIPs (cross-region) +resource "aws_route53_record" "external_perforce_replicas" { + for_each = module.terraform-aws-perforce.p4_server_replicas + + #checkov:skip=CKV2_AWS_23: Attached to EIP public IP + zone_id = data.aws_route53_zone.root.id + name = each.value.fqdn + type = "A" + ttl = 300 + records = [each.value.public_ip] +} + +# Health checks for cross-region replicas +resource "aws_route53_health_check" "replica_health_checks" { + for_each = module.terraform-aws-perforce.p4_server_replicas + + fqdn = each.value.fqdn + port = 1666 + type = "TCP" + failure_threshold = "3" + request_interval = "30" + cloudwatch_logs_region = "us-east-1" + cloudwatch_alarm_region = "us-east-1" + insufficient_data_health_status = "Failure" + + tags = { + Name = "${local.project_prefix}-${each.key}-health-check" + } +} + +########################################## +# P4 Code Review Certificate Management +########################################## +resource "aws_acm_certificate" "perforce" { + domain_name = "*.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + validation_method = "DNS" + + #checkov:skip=CKV2_AWS_71: Wildcard is necessary for this domain + + tags = { + environment = "dev" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_route53_record" "perforce_cert" { + for_each = { + for dvo in aws_acm_certificate.perforce.domain_validation_options : dvo.domain_name => { + name = dvo.resource_record_name + record = dvo.resource_record_value + type = dvo.resource_record_type + } + } + + allow_overwrite = true + name = each.value.name + records = [each.value.record] + ttl = 60 + type = each.value.type + zone_id = data.aws_route53_zone.root.id +} + +resource "aws_acm_certificate_validation" "perforce" { + certificate_arn = aws_acm_certificate.perforce.arn + validation_record_fqdns = [for record in aws_route53_record.perforce_cert : record.fqdn] + + lifecycle { + create_before_destroy = true + } + timeouts { + create = "15m" + } +} \ No newline at end of file diff --git a/modules/perforce/examples/replica-cross-region/locals.tf b/modules/perforce/examples/replica-cross-region/locals.tf new file mode 100644 index 00000000..189c164a --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/locals.tf @@ -0,0 +1,34 @@ +data "aws_availability_zones" "available" {} + +locals { + project_prefix = "cgd" + azs = slice(data.aws_availability_zones.available.names, 0, 2) + + # Subdomains + perforce_subdomain = "perforce" + p4_auth_subdomain = "auth" + p4_code_review_subdomain = "review" + + # P4 Server Domain + p4_server_fully_qualified_domain_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + # P4Auth Domain + p4_auth_fully_qualified_domain_name = "${local.p4_auth_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + # P4 Code Review + p4_code_review_fully_qualified_domain_name = "${local.p4_code_review_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + + # Amazon Certificate Manager (ACM) + certificate_arn = aws_acm_certificate.perforce.arn + + + # VPC Configuration + vpc_cidr_block = "10.0.0.0/16" + public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] + private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] + + tags = { + environment = "dev" + } +} diff --git a/modules/perforce/examples/replica-cross-region/main.tf b/modules/perforce/examples/replica-cross-region/main.tf new file mode 100644 index 00000000..86b106ca --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/main.tf @@ -0,0 +1,135 @@ +module "terraform-aws-perforce" { + source = "../../" + + providers = { + aws = aws + awscc = awscc + aws.us_west_2 = aws.us_west_2 + awscc.us_west_2 = awscc.us_west_2 + } + + # - Shared - + project_prefix = local.project_prefix + vpc_id = aws_vpc.perforce_vpc.id + + create_route53_private_hosted_zone = true + route53_private_hosted_zone_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + certificate_arn = local.certificate_arn + existing_security_groups = [aws_security_group.allow_my_ip.id] + shared_alb_subnets = aws_subnet.private_subnets[*].id + shared_nlb_subnets = aws_subnet.public_subnets[*].id + + # - P4 Server Configuration - + p4_server_config = { + # General + name = "p4-server" + fully_qualified_domain_name = local.p4_server_fully_qualified_domain_name + + # Compute + lookup_existing_ami = false + enable_auto_ami_creation = true + p4_server_type = "p4d_commit" + + # Storage + depot_volume_size = 128 + metadata_volume_size = 32 + logs_volume_size = 32 + + # Networking & Security + instance_subnet_id = aws_subnet.public_subnets[0].id + existing_security_groups = [aws_security_group.allow_my_ip.id] + } + + # - P4 Server Replicas Configuration - + p4_server_replicas_config = { + standby-1b : { + # Replica-specific + replica_type = "standby" + subdomain = "standby" + + # General + name = "p4-server-standby" + project_prefix = local.project_prefix + environment = "dev" + fully_qualified_domain_name = "standby.${local.p4_server_fully_qualified_domain_name}" + + # Compute + lookup_existing_ami = false + instance_type = "c6i.large" + instance_architecture = "x86_64" + p4_server_type = "p4d_replica" + unicode = false + selinux = false + case_sensitive = true + plaintext = false + + # Storage + storage_type = "EBS" + depot_volume_size = 128 + metadata_volume_size = 32 + logs_volume_size = 32 + + # Networking & Security + instance_subnet_id = aws_subnet.public_subnets[1].id + create_default_sg = true + existing_security_groups = [aws_security_group.allow_my_ip.id] + internal = false + }, + + readonly-1c : { + # Replica-specific + replica_type = "readonly" + subdomain = "ci" + + # General + name = "p4-server-ci" + project_prefix = local.project_prefix + environment = "dev" + fully_qualified_domain_name = "ci.${local.p4_server_fully_qualified_domain_name}" + + # Compute + lookup_existing_ami = false + instance_type = "c6i.large" + instance_architecture = "x86_64" + p4_server_type = "p4d_replica" + unicode = false + selinux = false + case_sensitive = true + plaintext = false + + # Storage + storage_type = "EBS" + depot_volume_size = 128 + metadata_volume_size = 32 + logs_volume_size = 32 + + # Networking & Security + instance_subnet_id = aws_subnet.public_subnets[2].id + create_default_sg = true + existing_security_groups = [aws_security_group.allow_my_ip.id] + internal = false + } + } + + # - P4Auth Configuration - + p4_auth_config = { + # General + name = "p4-auth" + fully_qualified_domain_name = local.p4_auth_fully_qualified_domain_name + existing_security_groups = [aws_security_group.allow_my_ip.id] + debug = true + deregistration_delay = 0 + service_subnets = aws_subnet.private_subnets[*].id + } + + # - P4 Code Review Configuration - + p4_code_review_config = { + name = "p4-code-review" + fully_qualified_domain_name = local.p4_code_review_fully_qualified_domain_name + existing_security_groups = [aws_security_group.allow_my_ip.id] + debug = true + deregistration_delay = 0 + service_subnets = aws_subnet.private_subnets[*].id + enable_sso = true + } +} diff --git a/modules/perforce/examples/replica-cross-region/outputs.tf b/modules/perforce/examples/replica-cross-region/outputs.tf new file mode 100644 index 00000000..d79ea5c8 --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/outputs.tf @@ -0,0 +1,14 @@ +output "p4_server_connection_string" { + value = "ssl:${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}:1666" + description = "The connection string for the P4 Server. Set your P4PORT environment variable to this value." +} + +output "p4_code_review_url" { + value = "https://${local.p4_code_review_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + description = "The URL for the P4 Code Review service." +} + +output "p4_auth_admin_url" { + value = "https://${local.p4_auth_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}/admin" + description = "The URL for the P4Auth service admin page." +} diff --git a/modules/perforce/examples/replica-cross-region/providers.tf b/modules/perforce/examples/replica-cross-region/providers.tf new file mode 100644 index 00000000..93044083 --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/providers.tf @@ -0,0 +1,60 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.0" + } + awscc = { + source = "hashicorp/awscc" + version = ">= 0.70.0" + } + netapp-ontap = { + source = "NetApp/netapp-ontap" + version = ">= 0.1.0" + } + } +} + +# Primary region (us-east-1) +provider "aws" { + region = "us-east-1" +} + +provider "awscc" { + region = "us-east-1" +} + +# Replica regions +provider "aws" { + alias = "us_west_2" + region = "us-west-2" +} + +provider "awscc" { + alias = "us_west_2" + region = "us-west-2" +} + +provider "aws" { + alias = "eu_west_1" + region = "eu-west-1" +} + +provider "awscc" { + alias = "eu_west_1" + region = "eu-west-1" +} + +# placeholder since provider is "required" by the module +provider "netapp-ontap" { + connection_profiles = [ + { + name = "null" + hostname = "null" + username = "null" + password = "null" + } + ] +} \ No newline at end of file diff --git a/modules/perforce/examples/replica-cross-region/security.tf b/modules/perforce/examples/replica-cross-region/security.tf new file mode 100644 index 00000000..6aa04001 --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/security.tf @@ -0,0 +1,64 @@ +data "http" "my_ip" { + url = "https://api.ipify.org" +} + +resource "aws_security_group" "allow_my_ip" { + name_prefix = "${local.project_prefix}-allow-my-ip-" + vpc_id = aws_vpc.perforce_vpc.id + + ingress { + description = "SSH" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["${chomp(data.http.my_ip.response_body)}/32"] + } + + ingress { + description = "Perforce" + from_port = 1666 + to_port = 1666 + protocol = "tcp" + cidr_blocks = ["${chomp(data.http.my_ip.response_body)}/32"] + } + + ingress { + description = "HTTP" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["${chomp(data.http.my_ip.response_body)}/32"] + } + + ingress { + description = "HTTPS" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["${chomp(data.http.my_ip.response_body)}/32"] + } + + # P4 replication traffic within VPC + ingress { + description = "P4 Replication within VPC" + from_port = 1666 + to_port = 1666 + protocol = "tcp" + cidr_blocks = [aws_vpc.perforce_vpc.cidr_block] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "${local.project_prefix}-allow-my-ip" + } + + lifecycle { + create_before_destroy = true + } +} \ No newline at end of file diff --git a/modules/perforce/examples/replica-cross-region/variables.tf b/modules/perforce/examples/replica-cross-region/variables.tf new file mode 100644 index 00000000..d111166c --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/variables.tf @@ -0,0 +1,4 @@ +variable "route53_public_hosted_zone_name" { + description = "The name of your existing Route53 Public Hosted Zone. This is required to create the ACM certificate and Route53 records." + type = string +} diff --git a/modules/perforce/examples/replica-cross-region/vpc.tf b/modules/perforce/examples/replica-cross-region/vpc.tf new file mode 100644 index 00000000..253c77b0 --- /dev/null +++ b/modules/perforce/examples/replica-cross-region/vpc.tf @@ -0,0 +1,86 @@ +########################################## +# Primary Region VPC (us-east-1) +########################################## +resource "aws_vpc" "perforce_vpc" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + + tags = { + Name = "${local.project_prefix}-perforce-vpc" + } +} + +resource "aws_internet_gateway" "perforce_igw" { + vpc_id = aws_vpc.perforce_vpc.id + + tags = { + Name = "${local.project_prefix}-perforce-igw" + } +} + +# Public subnets +resource "aws_subnet" "public_subnets" { + count = 3 + vpc_id = aws_vpc.perforce_vpc.id + cidr_block = "10.0.${count.index + 1}.0/24" + availability_zone = data.aws_availability_zones.available.names[count.index] + map_public_ip_on_launch = true + + tags = { + Name = "${local.project_prefix}-public-subnet-${count.index + 1}" + } +} + +# Private subnets +resource "aws_subnet" "private_subnets" { + count = 3 + vpc_id = aws_vpc.perforce_vpc.id + cidr_block = "10.0.${count.index + 10}.0/24" + availability_zone = data.aws_availability_zones.available.names[count.index] + + tags = { + Name = "${local.project_prefix}-private-subnet-${count.index + 1}" + } +} + +# Route tables +resource "aws_route_table" "public_rt" { + vpc_id = aws_vpc.perforce_vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.perforce_igw.id + } + + tags = { + Name = "${local.project_prefix}-public-rt" + } +} + +resource "aws_route_table" "private_rt" { + vpc_id = aws_vpc.perforce_vpc.id + + tags = { + Name = "${local.project_prefix}-private-rt" + } +} + +resource "aws_route_table_association" "public_rta" { + count = length(aws_subnet.public_subnets) + subnet_id = aws_subnet.public_subnets[count.index].id + route_table_id = aws_route_table.public_rt.id +} + +resource "aws_route_table_association" "private_rta" { + count = length(aws_subnet.private_subnets) + subnet_id = aws_subnet.private_subnets[count.index].id + route_table_id = aws_route_table.private_rt.id +} + +########################################## +# Data Sources +########################################## +data "aws_availability_zones" "available" { + state = "available" +} \ No newline at end of file diff --git a/modules/perforce/examples/replica-single-region/README.md b/modules/perforce/examples/replica-single-region/README.md new file mode 100644 index 00000000..60358ebe --- /dev/null +++ b/modules/perforce/examples/replica-single-region/README.md @@ -0,0 +1,55 @@ +# Perforce Single-Region Replica Example + +This example demonstrates how to deploy a Perforce server with replicas in the same AWS region for high availability and load distribution. + +## Architecture + +- **Primary P4 Server**: `perforce.yourdomain.com` (AZ 1a) +- **Standby Replica**: `standby.perforce.yourdomain.com` (AZ 1b) - Can be promoted to primary +- **Read-only Replica**: `ci.perforce.yourdomain.com` (AZ 1c) - For CI/CD systems + +## Benefits + +- **High Availability**: Survive single AZ failures +- **Load Distribution**: Spread read operations across replicas +- **CI/CD Optimization**: Dedicated replica for build systems +- **Zero Downtime Maintenance**: Promote standby during primary maintenance + +## Usage + +1. Set your Route53 hosted zone: + ```bash + export TF_VAR_route53_public_hosted_zone_name="yourdomain.com" + ``` + +2. Deploy the infrastructure: + ```bash + terraform init + terraform plan + terraform apply + ``` + +3. Access your Perforce servers: + - Primary: `perforce.yourdomain.com:1666` + - Standby: `standby.perforce.yourdomain.com:1666` + - CI/Build: `ci.perforce.yourdomain.com:1666` + +## Replica Types + +- **Standby**: Full replica that can be promoted to primary during failover +- **Read-only**: Optimized for read operations, perfect for CI/CD systems + +## Failover Process + +To promote the standby replica to primary: + +1. Stop the primary server +2. SSH to the standby replica instance +3. Run: `p4d -r /p4/1 -p 1666 -d -J off` +4. Update DNS to point primary FQDN to standby IP + +## Cleanup + +```bash +terraform destroy +``` \ No newline at end of file diff --git a/modules/perforce/examples/replica-single-region/dns.tf b/modules/perforce/examples/replica-single-region/dns.tf new file mode 100644 index 00000000..81ff6976 --- /dev/null +++ b/modules/perforce/examples/replica-single-region/dns.tf @@ -0,0 +1,101 @@ +########################################## +# Fetch Shared NLB DNS Name and Zone ID +########################################## +data "aws_lb" "shared_services_nlb" { + arn = module.terraform-aws-perforce.shared_network_load_balancer_arn + + depends_on = [module.terraform-aws-perforce] +} + +########################################## +# Fetch Route53 Public Hosted Zone for FQDN +########################################## +data "aws_route53_zone" "root" { + name = var.route53_public_hosted_zone_name + private_zone = false +} + +########################################## +# Perforce External (Public) DNS +########################################## +# Route all external web service traffic (e.g. auth.perforce.example.com, review.perforce.example.com) to the Public NLB +resource "aws_route53_record" "external_perforce_web_services" { + zone_id = data.aws_route53_zone.root.id + name = "*.${local.p4_server_fully_qualified_domain_name}" + type = "A" + alias { + name = data.aws_lb.shared_services_nlb.dns_name + zone_id = data.aws_lb.shared_services_nlb.zone_id + evaluate_target_health = true + } +} + +# Route external web service traffic to the public EIP of the P4 Server +resource "aws_route53_record" "external_perforce_p4_server" { + #checkov:skip=CKV2_AWS_23: Attached to EIP public IP + zone_id = data.aws_route53_zone.root.id + name = "perforce.${data.aws_route53_zone.root.name}" + type = "A" + ttl = 300 + records = [module.terraform-aws-perforce.p4_server_eip_public_ip] +} + +# Route external replica traffic to replica EIPs +resource "aws_route53_record" "external_perforce_replicas" { + for_each = module.terraform-aws-perforce.p4_server_replicas + + #checkov:skip=CKV2_AWS_23: Attached to EIP public IP + zone_id = data.aws_route53_zone.root.id + name = each.value.fqdn + type = "A" + ttl = 300 + records = [each.value.public_ip] +} + +########################################## +# P4 Code Review Certificate Management +########################################## +resource "aws_acm_certificate" "perforce" { + domain_name = "*.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + validation_method = "DNS" + + #checkov:skip=CKV2_AWS_71: Wildcard is necessary for this domain + + tags = { + environment = "dev" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_route53_record" "perforce_cert" { + for_each = { + for dvo in aws_acm_certificate.perforce.domain_validation_options : dvo.domain_name => { + name = dvo.resource_record_name + record = dvo.resource_record_value + type = dvo.resource_record_type + } + } + + allow_overwrite = true + name = each.value.name + records = [each.value.record] + ttl = 60 + type = each.value.type + zone_id = data.aws_route53_zone.root.id +} + +resource "aws_acm_certificate_validation" "perforce" { + certificate_arn = aws_acm_certificate.perforce.arn + validation_record_fqdns = [for record in aws_route53_record.perforce_cert : record.fqdn] + + lifecycle { + create_before_destroy = true + } + timeouts { + create = "15m" + } +} \ No newline at end of file diff --git a/modules/perforce/examples/replica-single-region/locals.tf b/modules/perforce/examples/replica-single-region/locals.tf new file mode 100644 index 00000000..1f413dbd --- /dev/null +++ b/modules/perforce/examples/replica-single-region/locals.tf @@ -0,0 +1,34 @@ +data "aws_availability_zones" "available" {} + +locals { + project_prefix = "cgd" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + + # Subdomains + perforce_subdomain = "perforce" + p4_auth_subdomain = "auth" + p4_code_review_subdomain = "review" + + # P4 Server Domain + p4_server_fully_qualified_domain_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + # P4Auth Domain + p4_auth_fully_qualified_domain_name = "${local.p4_auth_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + # P4 Code Review + p4_code_review_fully_qualified_domain_name = "${local.p4_code_review_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + + + # Amazon Certificate Manager (ACM) + certificate_arn = aws_acm_certificate.perforce.arn + + + # VPC Configuration + vpc_cidr_block = "10.0.0.0/16" + public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + private_subnet_cidrs = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + + tags = { + environment = "dev" + } +} diff --git a/modules/perforce/examples/replica-single-region/main.tf b/modules/perforce/examples/replica-single-region/main.tf new file mode 100644 index 00000000..11ec814c --- /dev/null +++ b/modules/perforce/examples/replica-single-region/main.tf @@ -0,0 +1,142 @@ +module "terraform-aws-perforce" { + source = "../../" + + # - Shared - + project_prefix = local.project_prefix + vpc_id = aws_vpc.perforce_vpc.id + + create_route53_private_hosted_zone = true + route53_private_hosted_zone_name = "${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + certificate_arn = local.certificate_arn + existing_security_groups = [aws_security_group.allow_my_ip.id] + shared_alb_subnets = aws_subnet.private_subnets[*].id + shared_nlb_subnets = aws_subnet.public_subnets[*].id + + # - P4 Server Configuration - + p4_server_config = { + # General + name = "p4-server" + fully_qualified_domain_name = local.p4_server_fully_qualified_domain_name + + # Compute + lookup_existing_ami = false + enable_auto_ami_creation = true + p4_server_type = "p4d_commit" + + # Storage + depot_volume_size = 128 + metadata_volume_size = 32 + logs_volume_size = 32 + + # Networking & Security + instance_subnet_id = aws_subnet.public_subnets[0].id + existing_security_groups = [aws_security_group.allow_my_ip.id] + } + + # - P4 Server Replicas Configuration - + p4_server_replicas_config = { + standby-1b : { + # Replica-specific + replica_type = "standby" + subdomain = "standby" + + # General + name = "p4-server-standby" + project_prefix = local.project_prefix + environment = "dev" + fully_qualified_domain_name = "standby.${local.p4_server_fully_qualified_domain_name}" + + # Compute + lookup_existing_ami = false + instance_type = "c6i.large" + instance_architecture = "x86_64" + p4_server_type = "p4d_replica" + unicode = false + selinux = false + case_sensitive = true + plaintext = false + + # Storage + storage_type = "EBS" + depot_volume_size = 128 + metadata_volume_size = 32 + logs_volume_size = 32 + + # Networking & Security + vpc_id = aws_vpc.perforce_vpc.id + instance_subnet_id = aws_subnet.public_subnets[1].id + create_default_sg = true + existing_security_groups = [aws_security_group.allow_my_ip.id] + internal = false + + }, + readonly-1c : { + # Replica-specific + replica_type = "readonly" + subdomain = "ci" + + # General + name = "p4-server-ci" + project_prefix = local.project_prefix + environment = "dev" + fully_qualified_domain_name = "ci.${local.p4_server_fully_qualified_domain_name}" + + # Compute + lookup_existing_ami = false + instance_type = "c6i.large" + instance_architecture = "x86_64" + p4_server_type = "p4d_replica" + unicode = false + selinux = false + case_sensitive = true + plaintext = false + + # Storage + storage_type = "EBS" + depot_volume_size = 128 + metadata_volume_size = 32 + logs_volume_size = 32 + + # Networking & Security + vpc_id = aws_vpc.perforce_vpc.id + instance_subnet_id = aws_subnet.public_subnets[2].id + create_default_sg = true + existing_security_groups = [aws_security_group.allow_my_ip.id] + internal = false + } + } + + # - P4Auth Configuration - + p4_auth_config = { + # General + name = "p4-auth" + fully_qualified_domain_name = local.p4_auth_fully_qualified_domain_name + existing_security_groups = [aws_security_group.allow_my_ip.id] + debug = true + deregistration_delay = 0 + service_subnets = aws_subnet.private_subnets[*].id + } + + # - P4 Code Review Configuration - + p4_code_review_config = { + name = "p4-code-review" + fully_qualified_domain_name = local.p4_code_review_fully_qualified_domain_name + existing_security_groups = [aws_security_group.allow_my_ip.id] + debug = true + deregistration_delay = 0 + service_subnets = aws_subnet.private_subnets[*].id + enable_sso = true + } +} + +# placeholder since provider is "required" by the module +provider "netapp-ontap" { + connection_profiles = [ + { + name = "null" + hostname = "null" + username = "null" + password = "null" + } + ] +} \ No newline at end of file diff --git a/modules/perforce/examples/replica-single-region/outputs.tf b/modules/perforce/examples/replica-single-region/outputs.tf new file mode 100644 index 00000000..b1a3f20f --- /dev/null +++ b/modules/perforce/examples/replica-single-region/outputs.tf @@ -0,0 +1,19 @@ +output "p4_server_connection_string" { + value = "ssl:${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}:1666" + description = "The connection string for the P4 Server. Set your P4PORT environment variable to this value." +} + +output "p4_code_review_url" { + value = "https://${local.p4_code_review_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}" + description = "The URL for the P4 Code Review service." +} + +output "p4_auth_admin_url" { + value = "https://${local.p4_auth_subdomain}.${local.perforce_subdomain}.${var.route53_public_hosted_zone_name}/admin" + description = "The URL for the P4Auth service admin page." +} + +output "p4_server_replicas_connection_strings" { + value = module.terraform-aws-perforce.p4_server_replicas_connection_strings + description = "Connection strings for each replica server." +} diff --git a/modules/perforce/examples/replica-single-region/security.tf b/modules/perforce/examples/replica-single-region/security.tf new file mode 100644 index 00000000..0f97ab67 --- /dev/null +++ b/modules/perforce/examples/replica-single-region/security.tf @@ -0,0 +1,47 @@ +resource "aws_security_group" "allow_my_ip" { + name = "allow_my_ip" + description = "Allow inbound traffic from my IP" + vpc_id = aws_vpc.perforce_vpc.id + + tags = { + Name = "allow_my_ip" + } +} + +data "http" "my_ip" { + url = "https://api.ipify.org" +} + +resource "aws_vpc_security_group_ingress_rule" "allow_https" { + security_group_id = aws_security_group.allow_my_ip.id + description = "Allow HTTPS traffic from my public IP." + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" +} +resource "aws_vpc_security_group_ingress_rule" "allow_http" { + security_group_id = aws_security_group.allow_my_ip.id + description = "Allow HTTP traffic from my public IP." + from_port = 80 + to_port = 80 + ip_protocol = "tcp" + cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" +} + +resource "aws_vpc_security_group_ingress_rule" "allow_icmp" { + security_group_id = aws_security_group.allow_my_ip.id + description = "Allow ICMP traffic from my public IP." + from_port = -1 + to_port = -1 + ip_protocol = "icmp" + cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" +} +resource "aws_vpc_security_group_ingress_rule" "allow_perforce" { + security_group_id = aws_security_group.allow_my_ip.id + description = "Allow Perforce traffic from my public IP." + from_port = 1666 + to_port = 1666 + ip_protocol = "tcp" + cidr_ipv4 = "${chomp(data.http.my_ip.response_body)}/32" +} diff --git a/modules/perforce/examples/replica-single-region/variables.tf b/modules/perforce/examples/replica-single-region/variables.tf new file mode 100644 index 00000000..d111166c --- /dev/null +++ b/modules/perforce/examples/replica-single-region/variables.tf @@ -0,0 +1,4 @@ +variable "route53_public_hosted_zone_name" { + description = "The name of your existing Route53 Public Hosted Zone. This is required to create the ACM certificate and Route53 records." + type = string +} diff --git a/modules/perforce/examples/replica-single-region/versions.tf b/modules/perforce/examples/replica-single-region/versions.tf new file mode 100644 index 00000000..0aed93c1 --- /dev/null +++ b/modules/perforce/examples/replica-single-region/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.6.0" + } + awscc = { + source = "hashicorp/awscc" + version = "1.50.0" + } + random = { + source = "hashicorp/random" + version = "3.7.2" + } + http = { + source = "hashicorp/http" + version = "3.5.0" + } + netapp-ontap = { + source = "NetApp/netapp-ontap" + version = "2.3.0" + } + } +} diff --git a/modules/perforce/examples/replica-single-region/vpc.tf b/modules/perforce/examples/replica-single-region/vpc.tf new file mode 100644 index 00000000..b66b6d9b --- /dev/null +++ b/modules/perforce/examples/replica-single-region/vpc.tf @@ -0,0 +1,135 @@ +########################################## +# VPC +########################################## +resource "aws_vpc" "perforce_vpc" { + cidr_block = local.vpc_cidr_block + enable_dns_hostnames = true + #checkov:skip=CKV2_AWS_11: VPC flow logging disabled by design + + tags = merge(local.tags, + { + Name = "${local.project_prefix}-perforce-vpc" + } + ) +} + +# Set default SG to restrict all traffic +resource "aws_default_security_group" "default" { + vpc_id = aws_vpc.perforce_vpc.id + + tags = merge(local.tags, + { + Name = "${local.project_prefix}-perforce-vpc-default-security-group" + } + ) + +} + +########################################## +# Subnets +########################################## +resource "aws_subnet" "public_subnets" { + count = length(local.public_subnet_cidrs) + vpc_id = aws_vpc.perforce_vpc.id + cidr_block = element(local.public_subnet_cidrs, count.index) + availability_zone = element(local.azs, count.index) + + tags = merge(local.tags, + { + Name = "${local.project_prefix}-pub-subnet-${count.index + 1}" + } + ) +} + +resource "aws_subnet" "private_subnets" { + count = length(local.private_subnet_cidrs) + vpc_id = aws_vpc.perforce_vpc.id + cidr_block = element(local.private_subnet_cidrs, count.index) + availability_zone = element(local.azs, count.index) + + tags = merge(local.tags, + { + Name = "${local.project_prefix}-pvt-subnet-${count.index + 1}" + } + ) +} + +########################################## +# Internet Gateway +########################################## +resource "aws_internet_gateway" "igw" { + vpc_id = aws_vpc.perforce_vpc.id + tags = merge(local.tags, + { + Name = "${local.project_prefix}-perforce-igw" + } + ) +} + +########################################## +# Route Tables & NAT Gateway +########################################## +resource "aws_route_table" "public_rt" { + vpc_id = aws_vpc.perforce_vpc.id + + # public route to the internet + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw.id + } + + tags = merge(local.tags, + { + Name = "${local.project_prefix}-perforce-public-rt" + } + ) +} + +resource "aws_route_table_association" "public_rt_asso" { + count = length(aws_subnet.public_subnets) + route_table_id = aws_route_table.public_rt.id + subnet_id = aws_subnet.public_subnets[count.index].id +} + +resource "aws_eip" "nat_gateway_eip" { + depends_on = [aws_internet_gateway.igw] + #checkov:skip=CKV2_AWS_19:EIP associated with NAT Gateway through association ID + tags = merge(local.tags, + { + Name = "${local.project_prefix}-perforce-nat-eip" + } + ) +} + +resource "aws_route_table" "private_rt" { + vpc_id = aws_vpc.perforce_vpc.id + + tags = merge(local.tags, + { + Name = "${local.project_prefix}-perforce-private-rt" + } + ) +} + +# route to the internet through NAT gateway +resource "aws_route" "private_rt_nat_gateway" { + route_table_id = aws_route_table.private_rt.id + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.nat_gateway.id +} + +resource "aws_route_table_association" "private_rt_asso" { + count = length(aws_subnet.private_subnets) + route_table_id = aws_route_table.private_rt.id + subnet_id = aws_subnet.private_subnets[count.index].id +} + +resource "aws_nat_gateway" "nat_gateway" { + allocation_id = aws_eip.nat_gateway_eip.id + subnet_id = aws_subnet.public_subnets[0].id + tags = merge(local.tags, + { + Name = "${local.project_prefix}-perforce-nat" + } + ) +} diff --git a/modules/perforce/locals.tf b/modules/perforce/locals.tf index b513a204..45cd78c7 100644 --- a/modules/perforce/locals.tf +++ b/modules/perforce/locals.tf @@ -7,4 +7,13 @@ locals { p4_port = var.p4_server_config != null ? ( "%{if !var.p4_server_config.plaintext}ssl:%{endif}${var.p4_server_config.fully_qualified_domain_name}:1666" ) : null + + # Replica domain mapping + replica_domains = { + for name, config in var.p4_server_replicas_config : name => ( + config.subdomain != null ? + "${config.subdomain}.${var.p4_server_config.fully_qualified_domain_name}" : + "${name}.${var.p4_server_config.fully_qualified_domain_name}" + ) + } } diff --git a/modules/perforce/main.tf b/modules/perforce/main.tf index 27b4b859..b82e2732 100644 --- a/modules/perforce/main.tf +++ b/modules/perforce/main.tf @@ -59,10 +59,68 @@ module "p4_server" { super_user_username_secret_arn = var.p4_server_config.super_user_username_secret_arn create_default_role = var.p4_server_config.create_default_role custom_role = var.p4_server_config.custom_role + replica_scripts_bucket_arn = var.p4_server_config != null ? aws_s3_bucket.p4_server_config_scripts[0].arn : null depends_on = [module.p4_auth] } +################################################# +# P4 Server Replicas +################################################# +module "p4_server_replicas" { + for_each = var.p4_server_replicas_config + source = "./modules/p4-server" + + # General - inherit from primary or use replica-specific + name = each.value.name != null ? each.value.name : "${var.p4_server_config.name}-${each.key}" + project_prefix = each.value.project_prefix != null ? each.value.project_prefix : var.p4_server_config.project_prefix + environment = each.value.environment != null ? each.value.environment : var.p4_server_config.environment + fully_qualified_domain_name = each.value.fully_qualified_domain_name != null ? each.value.fully_qualified_domain_name : local.replica_domains[each.key] + p4_server_type = "p4d_replica" + + # Compute - inherit from primary or use replica-specific + # Note: lookup_existing_ami and ami_prefix are not passed to submodule but kept for inheritance + instance_type = each.value.instance_type != null ? each.value.instance_type : var.p4_server_config.instance_type + instance_architecture = each.value.instance_architecture != null ? each.value.instance_architecture : var.p4_server_config.instance_architecture + unicode = each.value.unicode != null ? each.value.unicode : var.p4_server_config.unicode + selinux = each.value.selinux != null ? each.value.selinux : var.p4_server_config.selinux + case_sensitive = each.value.case_sensitive != null ? each.value.case_sensitive : var.p4_server_config.case_sensitive + plaintext = each.value.plaintext != null ? each.value.plaintext : var.p4_server_config.plaintext + + # Storage - inherit from primary or use replica-specific + storage_type = each.value.storage_type != null ? each.value.storage_type : var.p4_server_config.storage_type + depot_volume_size = each.value.depot_volume_size != null ? each.value.depot_volume_size : var.p4_server_config.depot_volume_size + metadata_volume_size = each.value.metadata_volume_size != null ? each.value.metadata_volume_size : var.p4_server_config.metadata_volume_size + logs_volume_size = each.value.logs_volume_size != null ? each.value.logs_volume_size : var.p4_server_config.logs_volume_size + + # Networking & Security - replica-specific + vpc_id = each.value.vpc_id + instance_subnet_id = each.value.instance_subnet_id + instance_private_ip = each.value.instance_private_ip + create_default_sg = each.value.create_default_sg != null ? each.value.create_default_sg : var.p4_server_config.create_default_sg + existing_security_groups = each.value.existing_security_groups != null ? each.value.existing_security_groups : var.p4_server_config.existing_security_groups + internal = each.value.internal != null ? each.value.internal : var.p4_server_config.internal + + # Credentials - inherit from primary configuration (not computed outputs to avoid cycles) + super_user_password_secret_arn = each.value.super_user_password_secret_arn != null ? each.value.super_user_password_secret_arn : var.p4_server_config.super_user_password_secret_arn + super_user_username_secret_arn = each.value.super_user_username_secret_arn != null ? each.value.super_user_username_secret_arn : var.p4_server_config.super_user_username_secret_arn + create_default_role = each.value.create_default_role != null ? each.value.create_default_role : var.p4_server_config.create_default_role + custom_role = each.value.custom_role != null ? each.value.custom_role : var.p4_server_config.custom_role + + # FSxN - inherit from primary or use replica-specific + fsxn_password = each.value.fsxn_password != null ? each.value.fsxn_password : var.p4_server_config.fsxn_password + fsxn_filesystem_security_group_id = each.value.fsxn_filesystem_security_group_id != null ? each.value.fsxn_filesystem_security_group_id : var.p4_server_config.fsxn_filesystem_security_group_id + protocol = each.value.protocol != null ? each.value.protocol : var.p4_server_config.protocol + fsxn_region = each.value.fsxn_region != null ? each.value.fsxn_region : var.p4_server_config.fsxn_region + fsxn_management_ip = each.value.fsxn_management_ip != null ? each.value.fsxn_management_ip : var.p4_server_config.fsxn_management_ip + fsxn_svm_name = each.value.fsxn_svm_name != null ? each.value.fsxn_svm_name : var.p4_server_config.fsxn_svm_name + amazon_fsxn_svm_id = each.value.amazon_fsxn_svm_id != null ? each.value.amazon_fsxn_svm_id : var.p4_server_config.amazon_fsxn_svm_id + # Note: fsxn_aws_profile is not passed to submodule but kept for inheritance + replica_scripts_bucket_arn = var.p4_server_config != null ? aws_s3_bucket.p4_server_config_scripts[0].arn : null + + depends_on = [module.p4_server] +} + ################################################# # P4Auth (formerly Perforce Helix Auth Service) ################################################# diff --git a/modules/perforce/modules/p4-auth/main.tf b/modules/perforce/modules/p4-auth/main.tf index 4fad4b17..bc13597b 100644 --- a/modules/perforce/modules/p4-auth/main.tf +++ b/modules/perforce/modules/p4-auth/main.tf @@ -116,7 +116,7 @@ resource "aws_ecs_task_definition" "task_definition" { logDriver = "awslogs" options = { awslogs-group = aws_cloudwatch_log_group.log_group.name - awslogs-region = data.aws_region.current.name + awslogs-region = data.aws_region.current.id awslogs-stream-prefix = "${local.name_prefix}-service" } } @@ -166,7 +166,7 @@ resource "aws_ecs_task_definition" "task_definition" { logDriver = "awslogs" options = { awslogs-group = aws_cloudwatch_log_group.log_group.name - awslogs-region = data.aws_region.current.name + awslogs-region = data.aws_region.current.id awslogs-stream-prefix = "${local.name_prefix}-service-config" } } diff --git a/modules/perforce/modules/p4-code-review/main.tf b/modules/perforce/modules/p4-code-review/main.tf index 2b8a5804..f9b793a3 100644 --- a/modules/perforce/modules/p4-code-review/main.tf +++ b/modules/perforce/modules/p4-code-review/main.tf @@ -75,7 +75,7 @@ resource "aws_ecs_task_definition" "task_definition" { logDriver = "awslogs" options = { awslogs-group = aws_cloudwatch_log_group.log_group.name - awslogs-region = data.aws_region.current.name + awslogs-region = data.aws_region.current.id awslogs-stream-prefix = "${local.name_prefix}-service" } } @@ -148,7 +148,7 @@ resource "aws_ecs_task_definition" "task_definition" { logDriver = "awslogs" options = { awslogs-group = aws_cloudwatch_log_group.log_group.name - awslogs-region = data.aws_region.current.name + awslogs-region = data.aws_region.current.id awslogs-stream-prefix = "${local.name_prefix}-service-config" } } diff --git a/modules/perforce/modules/p4-server/iam.tf b/modules/perforce/modules/p4-server/iam.tf index eed1212e..5b5b13b3 100644 --- a/modules/perforce/modules/p4-server/iam.tf +++ b/modules/perforce/modules/p4-server/iam.tf @@ -34,6 +34,22 @@ data "aws_iam_policy_document" "default_policy" { var.storage_type == "FSxN" && var.protocol == "ISCSI" ? var.fsxn_password : null ]) } + + # S3 permissions for replica configuration scripts + dynamic "statement" { + for_each = var.replica_scripts_bucket_arn != null ? [1] : [] + content { + effect = "Allow" + actions = [ + "s3:GetObject", + "s3:ListBucket" + ] + resources = [ + var.replica_scripts_bucket_arn, + "${var.replica_scripts_bucket_arn}/*" + ] + } + } } resource "aws_iam_policy" "default_policy" { diff --git a/modules/perforce/modules/p4-server/variables.tf b/modules/perforce/modules/p4-server/variables.tf index 378f59d1..d7501ee0 100644 --- a/modules/perforce/modules/p4-server/variables.tf +++ b/modules/perforce/modules/p4-server/variables.tf @@ -262,6 +262,12 @@ variable "custom_role" { default = null } +variable "replica_scripts_bucket_arn" { + type = string + description = "ARN of the S3 bucket containing replica configuration scripts" + default = null +} + variable "tags" { type = map(any) description = "Tags to apply to resources." diff --git a/modules/perforce/outputs.tf b/modules/perforce/outputs.tf index 39a44825..ebe32c7f 100644 --- a/modules/perforce/outputs.tf +++ b/modules/perforce/outputs.tf @@ -112,3 +112,40 @@ output "p4_server_lambda_link_name" { module.p4_server[0].lambda_link_name : null) description = "The name of the Lambda link for the P4 Server instance to use with FSxN." } + +# P4 Server Replicas +output "p4_server_replicas" { + value = { + for name, replica in module.p4_server_replicas : name => { + instance_id = replica.instance_id + private_ip = replica.private_ip + public_ip = replica.eip_public_ip + fqdn = local.replica_domains[name] + security_group_id = replica.security_group_id + super_user_password_secret_arn = replica.super_user_password_secret_arn + super_user_username_secret_arn = replica.super_user_username_secret_arn + } + } + description = "Map of P4 server replica information including instance IDs, IPs, FQDNs, and credentials." +} + +output "p4_server_replicas_instance_ids" { + value = { + for name, replica in module.p4_server_replicas : name => replica.instance_id + } + description = "Map of replica names to instance IDs for easy reference." +} + +output "p4_server_replicas_fqdns" { + value = { + for name, config in var.p4_server_replicas_config : name => local.replica_domains[name] + } + description = "Map of replica names to their fully qualified domain names." +} + +output "p4_server_replicas_connection_strings" { + value = { + for name, replica in module.p4_server_replicas : name => "ssl:${local.replica_domains[name]}:1666" + } + description = "Map of replica names to their P4 connection strings." +} diff --git a/modules/perforce/phase2-todo.md b/modules/perforce/phase2-todo.md new file mode 100644 index 00000000..cbda094b --- /dev/null +++ b/modules/perforce/phase2-todo.md @@ -0,0 +1,194 @@ +# Phase 2: Cross-Region Replica Support - Implementation Plan + +## Overview +Phase 2 extends Phase 1's same-region replica support to enable true cross-region P4 server replicas for global development teams and disaster recovery. + +## Current State (Phase 1) +✅ Same-region replicas working (multi-AZ within single region) +✅ Basic replica infrastructure (S3, SSM, Route53, IAM) +✅ Replica inheritance and validation +❌ Cross-region functionality is placeholder only + +## Phase 2 Requirements + +### 1. Cross-Region Detection & Provider Handling +**Goal**: Automatically detect when replicas are in different regions and use appropriate providers + +**Implementation**: +- Add `region` field to `p4_server_replicas_config` variable +- Create `data.aws_region.current` to get primary region +- Add logic to detect cross-region replicas: `each.value.region != data.aws_region.current.name` +- Implement provider selection logic for cross-region resources + +**Files to modify**: +- `modules/perforce/variables.tf` - Add region field +- `modules/perforce/main.tf` - Add region detection logic +- `modules/perforce/providers.tf` - Define required providers + +### 2. Cross-Region IAM Policies +**Goal**: Enable replicas to access primary region resources (S3, Secrets Manager, SSM) + +**Implementation**: +- Modify IAM policies to include cross-region resource ARNs +- Add permissions for cross-region S3 access +- Add permissions for cross-region Secrets Manager access +- Add permissions for cross-region SSM execution + +**Files to modify**: +- `modules/perforce/modules/p4-server/iam.tf` - Update IAM policies +- Add cross-region resource ARN patterns + +### 3. Provider-Aware Resource Creation +**Goal**: Create resources in correct regions using provider aliases + +**Implementation**: +- Modify replica module calls to use region-specific providers +- Update S3 bucket access for cross-region scenarios +- Handle AWSCC vs AWS provider differences across regions +- Implement provider selection logic + +**Files to modify**: +- `modules/perforce/main.tf` - Provider-aware module calls +- `modules/perforce/s3.tf` - Cross-region S3 access +- `modules/perforce/ssm.tf` - Cross-region SSM execution + +### 4. Cross-Region DNS Management +**Goal**: Manage DNS records across regions with health checks + +**Implementation**: +- Create Route53 health checks for cross-region replicas +- Implement failover routing policies +- Add latency-based routing for global teams +- Handle cross-region certificate management + +**Files to modify**: +- `modules/perforce/route53.tf` - Cross-region DNS logic +- Add health check resources +- Add routing policy configuration + +### 5. Network Connectivity Validation +**Goal**: Validate that cross-region networking is properly configured + +**Implementation**: +- Add validation for VPC peering/Transit Gateway connectivity +- Validate security group rules for cross-region traffic +- Add network connectivity checks +- Provide clear error messages for missing networking + +**Files to modify**: +- `modules/perforce/variables.tf` - Add networking validations +- `modules/perforce/networking.tf` - New file for network checks + +### 6. Enhanced Examples +**Goal**: Provide working cross-region examples with proper networking + +**Implementation**: +- Update `replica-cross-region` example with real cross-region setup +- Add VPC peering configuration +- Add cross-region security group rules +- Add Transit Gateway example (optional) + +**Files to modify**: +- `modules/perforce/examples/replica-cross-region/` - Complete rewrite +- Add networking infrastructure +- Add proper provider configuration + +## Implementation Tasks + +### Task 1: Core Cross-Region Logic (Week 1) +- [ ] Add `region` field to replica configuration +- [ ] Implement region detection logic +- [ ] Add provider selection mechanism +- [ ] Update variable validations + +### Task 2: Cross-Region IAM & Security (Week 2) +- [ ] Update IAM policies for cross-region access +- [ ] Add cross-region S3 permissions +- [ ] Add cross-region Secrets Manager permissions +- [ ] Test cross-region resource access + +### Task 3: Provider-Aware Resources (Week 3) +- [ ] Implement provider-specific resource creation +- [ ] Update S3 bucket access patterns +- [ ] Update SSM execution for cross-region +- [ ] Handle AWSCC provider differences + +### Task 4: DNS & Health Checks (Week 4) +- [ ] Implement Route53 health checks +- [ ] Add failover routing policies +- [ ] Add latency-based routing +- [ ] Test DNS failover scenarios + +### Task 5: Network Validation (Week 5) +- [ ] Add VPC connectivity validation +- [ ] Add security group validation +- [ ] Implement network connectivity tests +- [ ] Add clear error messaging + +### Task 6: Cross-Region Example (Week 6) +- [ ] Rewrite cross-region example +- [ ] Add VPC peering setup +- [ ] Add cross-region security groups +- [ ] Add comprehensive testing +- [ ] Update documentation + +## Technical Challenges + +### Provider Alias Complexity +- Terraform modules with provider aliases are complex +- Need to handle dynamic provider selection +- AWSCC provider differences across regions + +### IAM Cross-Region Permissions +- Resource ARNs must include all regions +- Secrets Manager cross-region access patterns +- S3 cross-region bucket policies + +### Network Dependencies +- VPC peering must exist before replica creation +- Security groups must allow cross-region traffic +- DNS resolution across regions + +### State Management +- Cross-region resources in single state file +- Provider configuration complexity +- Dependency ordering across regions + +## Success Criteria + +### Functional Requirements +- [ ] Replicas deploy successfully in different regions +- [ ] Cross-region replication works (P4 sync from remote replica) +- [ ] Failover works (promote replica in different region) +- [ ] DNS routing works (latency-based, health check-based) + +### Performance Requirements +- [ ] Cross-region latency < 200ms for most operations +- [ ] Failover time < 15 minutes +- [ ] Health check detection < 5 minutes + +### Operational Requirements +- [ ] Clear error messages for networking issues +- [ ] Comprehensive validation of prerequisites +- [ ] Working examples with full networking setup +- [ ] Documentation for cross-region setup + +## Dependencies on Phase 1 +- Phase 1 replica infrastructure (S3, SSM, Route53) +- Phase 1 validation logic +- Phase 1 inheritance patterns +- Phase 1 IAM foundation + +## Risk Mitigation +- Start with simple 2-region setup +- Extensive testing of provider alias patterns +- Validate networking prerequisites early +- Provide fallback to same-region if cross-region fails +- Clear documentation of networking requirements + +## Future Enhancements (Phase 3) +- Multi-region mesh topology +- Automatic network setup (Transit Gateway) +- Advanced routing policies +- Cross-region backup/restore +- Global load balancing \ No newline at end of file diff --git a/modules/perforce/route53.tf b/modules/perforce/route53.tf index d5171815..938de33f 100644 --- a/modules/perforce/route53.tf +++ b/modules/perforce/route53.tf @@ -35,3 +35,16 @@ resource "aws_route53_record" "internal_p4_server" { #checkov:skip=CKV2_AWS_23: Route53 A record is necessary for this example deployment } + +# Route replica traffic to replica instances +resource "aws_route53_record" "internal_p4_replicas" { + for_each = var.p4_server_replicas_config + + zone_id = aws_route53_zone.perforce_private_hosted_zone[0].zone_id + name = local.replica_domains[each.key] + type = "A" + records = [module.p4_server_replicas[each.key].private_ip] + ttl = 300 + + #checkov:skip=CKV2_AWS_23: Route53 A record is necessary for replica deployment +} diff --git a/modules/perforce/s3.tf b/modules/perforce/s3.tf new file mode 100644 index 00000000..fcba80d6 --- /dev/null +++ b/modules/perforce/s3.tf @@ -0,0 +1,78 @@ +################################################# +# S3 Bucket for P4 Replica Configuration Scripts +################################################# +resource "random_id" "bucket_suffix" { + byte_length = 8 +} + +resource "aws_s3_bucket" "p4_server_config_scripts" { + count = var.p4_server_config != null ? 1 : 0 + bucket = "${var.project_prefix}-p4-server-scripts-${random_id.bucket_suffix.hex}" + + tags = merge(var.tags, { + Name = "${var.project_prefix}-p4-server-scripts" + }) +} + +resource "aws_s3_bucket_versioning" "p4_server_config_scripts" { + count = var.p4_server_config != null ? 1 : 0 + bucket = aws_s3_bucket.p4_server_config_scripts[0].id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "p4_server_config_scripts" { + count = var.p4_server_config != null ? 1 : 0 + bucket = aws_s3_bucket.p4_server_config_scripts[0].id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket_public_access_block" "p4_server_config_scripts" { + count = var.p4_server_config != null ? 1 : 0 + bucket = aws_s3_bucket.p4_server_config_scripts[0].id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +################################################# +# Upload Configuration Scripts to S3 +################################################# +resource "aws_s3_object" "configure_primary_script" { + count = length(var.p4_server_replicas_config) > 0 ? 1 : 0 + bucket = aws_s3_bucket.p4_server_config_scripts[0].id + key = "configure_primary_for_replicas.sh" + source = "${path.module}/scripts/configure_primary_for_replicas.sh" + etag = filemd5("${path.module}/scripts/configure_primary_for_replicas.sh") + + tags = var.tags +} + +resource "aws_s3_object" "configure_replica_script" { + count = length(var.p4_server_replicas_config) > 0 ? 1 : 0 + bucket = aws_s3_bucket.p4_server_config_scripts[0].id + key = "configure_replica.sh" + source = "${path.module}/scripts/configure_replica.sh" + etag = filemd5("${path.module}/scripts/configure_replica.sh") + + tags = var.tags +} + +# TODO: Remove test script after SSM functionality is verified +resource "aws_s3_object" "test_ssm_script" { + count = length(var.p4_server_replicas_config) > 0 ? 1 : 0 + bucket = aws_s3_bucket.p4_server_config_scripts[0].id + key = "test_ssm_execution.sh" + source = "${path.module}/scripts/test_ssm_execution.sh" + etag = filemd5("${path.module}/scripts/test_ssm_execution.sh") + + tags = var.tags +} \ No newline at end of file diff --git a/modules/perforce/scripts/configure_primary_for_replicas.sh b/modules/perforce/scripts/configure_primary_for_replicas.sh new file mode 100644 index 00000000..37844d96 --- /dev/null +++ b/modules/perforce/scripts/configure_primary_for_replicas.sh @@ -0,0 +1,32 @@ +#!/bin/bash +REPLICA_LIST=$1 + +echo "Configuring primary server for replica support" +echo "Replica list: $REPLICA_LIST" + +# Create replication service user +echo "Creating replication service user..." +p4 user -f -i <&1) + P4_EXIT_CODE=$? + + if [ $P4_EXIT_CODE -eq 0 ]; then + echo "SUCCESS: Connected to primary server on attempt $i" + break + fi + + echo "ATTEMPT $i FAILED: $P4_OUTPUT" + if [ $i -eq 10 ]; then + echo "ERROR: Cannot connect to primary server $PRIMARY_FQDN:1666 after 10 attempts" + echo "Common causes:" + echo "- Primary server not running (check EC2 instance status)" + echo "- Network connectivity issues (check security groups/NACLs)" + echo "- DNS resolution failure (check Route53 records)" + echo "- Perforce service not started on primary" + exit 1 # Connection failure + fi + sleep 30 +done + +# Configure replica based on type +echo "Configuring replica type: $REPLICA_TYPE" +case $REPLICA_TYPE in + "standby") + echo "Setting up standby replica..." + if ! p4 configure set P4TARGET=$PRIMARY_FQDN:1666; then + echo "ERROR: Failed to set replication target" + exit 2 + fi + p4 configure set server.id=standby-replica + p4 configure set rpl.journalcopy.enable=1 + p4 configure set rpl.journalcopy.location=/p4/1/logs/journal + ;; + "forwarding") + echo "Setting up forwarding replica..." + if ! p4 configure set P4TARGET=$PRIMARY_FQDN:1666; then + echo "ERROR: Failed to set replication target" + exit 2 + fi + p4 configure set server.id=forwarding-replica + p4 configure set rpl.forward.enable=1 + p4 configure set rpl.pull.enable=1 + ;; + "readonly") + echo "Setting up readonly replica..." + if ! p4 configure set P4TARGET=$PRIMARY_FQDN:1666; then + echo "ERROR: Failed to set replication target" + exit 2 + fi + p4 configure set server.id=readonly-replica + p4 configure set rpl.pull.enable=1 + ;; + "edge") + echo "Setting up edge replica..." + if ! p4 configure set P4TARGET=$PRIMARY_FQDN:1666; then + echo "ERROR: Failed to set replication target" + exit 2 + fi + p4 configure set server.id=edge-replica + p4 configure set rpl.pull.enable=1 + p4 configure set rpl.pull.reload=1 + ;; + *) + echo "ERROR: Unknown replica type: $REPLICA_TYPE" + echo "Supported types: standby, forwarding, readonly, edge" + exit 3 # Invalid replica type + ;; +esac + +# Start replication +echo "Starting replication process..." +if ! p4d -r /p4/1 -p 1666 -d; then + echo "ERROR: Failed to start Perforce daemon" + exit 4 # Daemon start failure +fi + +# Verify replication is working +echo "Verifying replication..." +sleep 10 +if p4 pull -l | grep -q "up-to-date"; then + echo "SUCCESS: Replica configured and replication active" +else + echo "WARNING: Replication may not be fully synchronized yet" +fi + +echo "SUCCESS: Replica $REPLICA_TYPE configured successfully" \ No newline at end of file diff --git a/modules/perforce/scripts/test_ssm_execution.sh b/modules/perforce/scripts/test_ssm_execution.sh new file mode 100644 index 00000000..11a34356 --- /dev/null +++ b/modules/perforce/scripts/test_ssm_execution.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# TODO: Remove this test script after SSM functionality is verified +# Simple test script to verify SSM execution + +TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S') +TEST_FILE="/var/log/ssm_test_execution.log" + +echo "[$TIMESTAMP] SSM script executed successfully" >> $TEST_FILE +echo "[$TIMESTAMP] Current user: $(whoami)" >> $TEST_FILE +echo "[$TIMESTAMP] Working directory: $(pwd)" >> $TEST_FILE +echo "[$TIMESTAMP] Available disk space:" >> $TEST_FILE +df -h >> $TEST_FILE +echo "[$TIMESTAMP] Test completed" >> $TEST_FILE + +# Also create a simple marker file +touch /var/log/ssm_executed_$(date +%s) + +echo "SUCCESS: Test script completed. Check /var/log/ssm_test_execution.log" \ No newline at end of file diff --git a/modules/perforce/ssm.tf b/modules/perforce/ssm.tf new file mode 100644 index 00000000..f51a3dc2 --- /dev/null +++ b/modules/perforce/ssm.tf @@ -0,0 +1,90 @@ +################################################# +# SSM Associations for P4 Replica Configuration +################################################# + +# Configure primary server for replication +resource "aws_ssm_association" "configure_primary" { + count = length(var.p4_server_replicas_config) > 0 ? 1 : 0 + + name = "AWS-RunShellScript" + + targets { + key = "InstanceIds" + values = [module.p4_server[0].instance_id] + } + + parameters = { + commands = join("\n", [ + "aws s3 cp s3://${aws_s3_bucket.p4_server_config_scripts[0].id}/configure_primary_for_replicas.sh /tmp/", + "chmod +x /tmp/configure_primary_for_replicas.sh", + "/tmp/configure_primary_for_replicas.sh" + ]) + } + + depends_on = [ + module.p4_server, + aws_s3_object.configure_primary_script + ] + + tags = var.tags +} + +# TODO: Remove test SSM execution after functionality is verified +# Test SSM execution on primary server +resource "aws_ssm_association" "test_ssm_primary" { + count = length(var.p4_server_replicas_config) > 0 ? 1 : 0 + + name = "AWS-RunShellScript" + + targets { + key = "InstanceIds" + values = [module.p4_server[0].instance_id] + } + + parameters = { + commands = join("\n", [ + "aws s3 cp s3://${aws_s3_bucket.p4_server_config_scripts[0].id}/test_ssm_execution.sh /tmp/", + "chmod +x /tmp/test_ssm_execution.sh", + "/tmp/test_ssm_execution.sh" + ]) + } + + depends_on = [ + module.p4_server, + aws_s3_object.test_ssm_script + ] + + tags = var.tags +} + +# Configure replica servers +resource "aws_ssm_association" "configure_replicas" { + for_each = var.p4_server_replicas_config + + name = "AWS-RunShellScript" + + targets { + key = "InstanceIds" + values = [module.p4_server_replicas[each.key].instance_id] + } + + parameters = { + commands = join("\n", [ + # TODO: Remove test execution lines after SSM functionality is verified + "aws s3 cp s3://${aws_s3_bucket.p4_server_config_scripts[0].id}/test_ssm_execution.sh /tmp/", + "chmod +x /tmp/test_ssm_execution.sh", + "/tmp/test_ssm_execution.sh", + "aws s3 cp s3://${aws_s3_bucket.p4_server_config_scripts[0].id}/configure_replica.sh /tmp/", + "chmod +x /tmp/configure_replica.sh", + "/tmp/configure_replica.sh ${var.p4_server_config.fully_qualified_domain_name} ${each.value.replica_type}" + ]) + } + + depends_on = [ + aws_ssm_association.configure_primary, + aws_s3_object.configure_replica_script, + aws_s3_object.test_ssm_script + ] + + tags = var.tags +} \ No newline at end of file diff --git a/modules/perforce/tests/03_p4_server_replicas.tftest.hcl b/modules/perforce/tests/03_p4_server_replicas.tftest.hcl new file mode 100644 index 00000000..cabf464a --- /dev/null +++ b/modules/perforce/tests/03_p4_server_replicas.tftest.hcl @@ -0,0 +1,32 @@ +run "replica_validation" { + command = plan + + variables { + route53_public_hosted_zone_name = "example.com" + } + + module { + source = "./examples/replica-single-region" + } + + # Verify replica configuration is properly parsed + assert { + condition = length(keys(var.p4_server_replicas_config)) == 2 + error_message = "Should have exactly 2 replicas configured" + } + + # Verify replica types are valid + assert { + condition = alltrue([ + for k, v in var.p4_server_replicas_config : + contains(["standby", "readonly"], v.replica_type) + ]) + error_message = "All replica types should be valid" + } + + # Verify replica domains are generated correctly + assert { + condition = length(local.replica_domains) == 2 + error_message = "Should generate domains for all replicas" + } +} \ No newline at end of file diff --git a/modules/perforce/variables.tf b/modules/perforce/variables.tf index 91ff7796..82bbe1fb 100644 --- a/modules/perforce/variables.tf +++ b/modules/perforce/variables.tf @@ -428,6 +428,113 @@ variable "p4_auth_config" { } +######################################## +# P4 Server Replicas +######################################## +variable "p4_server_replicas_config" { + description = "Map of P4 server replica configurations" + type = map(object({ + # Replica-specific fields + replica_type = optional(string, "readonly") + subdomain = optional(string, null) + + # General (inherits from primary if not specified) + name = optional(string, null) + project_prefix = optional(string, null) + environment = optional(string, null) + fully_qualified_domain_name = optional(string, null) # Auto-generated if not provided + + # Compute (inherits from primary if not specified) + lookup_existing_ami = optional(bool, null) + ami_prefix = optional(string, null) + instance_type = optional(string, null) + instance_architecture = optional(string, null) + unicode = optional(bool, null) + selinux = optional(bool, null) + case_sensitive = optional(bool, null) + plaintext = optional(bool, null) + + # Storage (inherits from primary if not specified) + storage_type = optional(string, null) + depot_volume_size = optional(number, null) + metadata_volume_size = optional(number, null) + logs_volume_size = optional(number, null) + + # Networking & Security (replica-specific) + vpc_id = string + instance_subnet_id = string + instance_private_ip = optional(string, null) + create_default_sg = optional(bool, null) + existing_security_groups = optional(list(string), null) + internal = optional(bool, null) + + # Credentials (inherits from primary if not specified) + super_user_password_secret_arn = optional(string, null) + super_user_username_secret_arn = optional(string, null) + create_default_role = optional(bool, null) + custom_role = optional(string, null) + + # FSxN (inherits from primary if not specified) + fsxn_password = optional(string, null) + fsxn_filesystem_security_group_id = optional(string, null) + protocol = optional(string, null) + fsxn_region = optional(string, null) + fsxn_management_ip = optional(string, null) + fsxn_svm_name = optional(string, null) + amazon_fsxn_svm_id = optional(string, null) + fsxn_aws_profile = optional(string, null) + })) + default = {} + + validation { + condition = alltrue([ + for k, v in var.p4_server_replicas_config : + contains(["standby", "forwarding", "readonly", "edge"], v.replica_type) + ]) + error_message = "Replica type must be one of: standby, forwarding, readonly, edge" + } + + validation { + condition = var.p4_server_config == null ? true : alltrue([ + for k, v in var.p4_server_replicas_config : + v.storage_type == null || v.storage_type == var.p4_server_config.storage_type + ]) + error_message = "Replica storage_type must match primary server storage_type for replication compatibility" + } + + validation { + condition = var.p4_server_config == null ? true : alltrue([ + for k, v in var.p4_server_replicas_config : + v.depot_volume_size == null || v.depot_volume_size >= var.p4_server_config.depot_volume_size + ]) + error_message = "Replica depot_volume_size must be >= primary server depot_volume_size for replication" + } + + validation { + condition = var.p4_server_config == null ? true : alltrue([ + for k, v in var.p4_server_replicas_config : + v.metadata_volume_size == null || v.metadata_volume_size >= var.p4_server_config.metadata_volume_size + ]) + error_message = "Replica metadata_volume_size must be >= primary server metadata_volume_size for replication" + } + + validation { + condition = var.p4_server_config == null ? true : alltrue([ + for k, v in var.p4_server_replicas_config : + v.unicode == null || v.unicode == var.p4_server_config.unicode + ]) + error_message = "Replica unicode setting must match primary server for P4 compatibility" + } + + validation { + condition = var.p4_server_config == null ? true : alltrue([ + for k, v in var.p4_server_replicas_config : + v.case_sensitive == null || v.case_sensitive == var.p4_server_config.case_sensitive + ]) + error_message = "Replica case_sensitive setting must match primary server for P4 compatibility" + } +} + ######################################## # P4 Code Review ########################################