| `[]` | no |
+| [node-group-name](#input\_node-group-name) | n/a | `string` | `"codepipes-cdn-node-group"` | no |
+| [role-eks-demo-node](#input\_role-eks-demo-node) | n/a | `string` | `"codepipes-cdn-eks-demo-node"` | no |
+| [vpc-eks-tag-name](#input\_vpc-eks-tag-name) | n/a | `string` | `"codepipes-cdn-eks-demo-tag-name"` | no |
+| [wait\_for\_cluster\_cmd](#input\_wait\_for\_cluster\_cmd) | Custom local-exec command to execute for determining if the eks cluster is healthy. Cluster endpoint will be available as an environment variable called ENDPOINT | `string` | `" apk add curl; for i in `seq 1 60`; do curl -k $ENDPOINT/healthz >/dev/null && exit 0 || true; sleep 5; done; echo TIMEOUT && exit 1"` | no |
+| [wait\_for\_cluster\_interpreter](#input\_wait\_for\_cluster\_interpreter) | Custom local-exec command line interpreter for the command to determining if the eks cluster is healthy. | `list(string)` |
[ "/bin/sh", "-c" ]
| no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [certificate\_arn](#output\_certificate\_arn) | The ARN of the certificate that is being validated. |
+| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server. |
+| [cluster\_name](#output\_cluster\_name) | Name of the cluster |
+| [cluster\_region](#output\_cluster\_region) | Cluster Region |
+| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | Generated AWS Auth Config Map |
+| [eks\_arn](#output\_eks\_arn) | ARN of the cluster role. |
+| [kubeconfig](#output\_kubeconfig) | kubeconfig file |
+| [node\_arn](#output\_node\_arn) | ARN of the node role. |
+| [rds\_instance\_address](#output\_rds\_instance\_address) | The hostname of the RDS instance. |
+| [rds\_instance\_endpoint](#output\_rds\_instance\_endpoint) | The connection endpoint in address:port format. |
+| [rds\_instance\_id](#output\_rds\_instance\_id) | The RDS instance id. |
+| [redis\_endpoint](#output\_redis\_endpoint) | Elasticache redis connection endpoint in address:port format. |
+| [redis\_hostname](#output\_redis\_hostname) | Elasticache redis address |
+| [redis\_port](#output\_redis\_port) | Elasticache redis address |
+| [redis\_security\_group\_id](#output\_redis\_security\_group\_id) | ID of the elasticache security group. |
+| [security\_group\_id](#output\_security\_group\_id) | ID of the db security group. |
+| [subnet\_group\_id](#output\_subnet\_group\_id) | The db subnet group name. |
+
\ No newline at end of file
diff --git a/tfs/aws-eks-vpc/aws-auth.tf b/tfs/aws-eks-vpc/aws-auth.tf
new file mode 100644
index 00000000..46209722
--- /dev/null
+++ b/tfs/aws-eks-vpc/aws-auth.tf
@@ -0,0 +1,55 @@
+locals {
+ workers_role_arns = [aws_iam_role.cluster.arn, aws_iam_role.node.arn]
+
+ # Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap
+ # Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically
+ map_worker_roles = [
+ for role_arn in local.workers_role_arns : {
+ rolearn : role_arn
+ username : "${role_arn}-user"
+ groups : [
+ "system:bootstrappers",
+ "system:masters"
+ ]
+ }
+ ]
+}
+
+
+provider "kubernetes" {
+ host = local.cluster_endpoint
+ cluster_ca_certificate = base64decode(aws_eks_cluster.demo.certificate_authority.0.data)
+ token = data.aws_eks_cluster_auth.cluster_auth.token
+}
+
+locals {
+ cluster_endpoint = aws_eks_cluster.demo.endpoint
+}
+
+resource "kubernetes_config_map" "aws_auth" {
+ depends_on = [aws_eks_cluster.demo]
+
+ metadata {
+ name = "aws-auth"
+ namespace = "kube-system"
+ }
+
+ data = {
+ mapRoles = yamlencode(distinct(concat(local.map_worker_roles)))
+ }
+}
+
+resource "null_resource" "wait_for_cluster" {
+ depends_on = [
+ aws_eks_cluster.demo,
+ aws_security_group.demo-cluster
+ ]
+
+ provisioner "local-exec" {
+ command = var.wait_for_cluster_cmd
+ interpreter = var.wait_for_cluster_interpreter
+ environment = {
+ ENDPOINT = aws_eks_cluster.demo.endpoint
+ }
+ }
+}
diff --git a/tfs/aws-eks-vpc/certificates.tf b/tfs/aws-eks-vpc/certificates.tf
new file mode 100644
index 00000000..1a079615
--- /dev/null
+++ b/tfs/aws-eks-vpc/certificates.tf
@@ -0,0 +1,36 @@
+resource "aws_acm_certificate" "main" {
+ count = var.certificate_enabled ? 1 : 0
+ domain_name = "*.${var.domain_name}"
+ validation_method = "DNS"
+}
+
+data "aws_route53_zone" "main" {
+ count = var.certificate_enabled ? 1 : 0
+ name = var.domain_name
+ private_zone = false
+}
+
+resource "aws_route53_record" "main" {
+ for_each = {
+ for dvo in flatten([
+ for cert in aws_acm_certificate.main: cert.domain_validation_options
+ ]): dvo.domain_name => {
+ name = dvo.resource_record_name
+ record = dvo.resource_record_value
+ type = dvo.resource_record_type
+ }
+ }
+
+ allow_overwrite = true
+ name = each.value.name
+ records = [each.value.record]
+ ttl = 60
+ type = each.value.type
+ zone_id = data.aws_route53_zone.main[0].zone_id
+}
+
+resource "aws_acm_certificate_validation" "main" {
+ count = var.certificate_enabled ? 1 : 0
+ certificate_arn = aws_acm_certificate.main[count.index].arn
+ validation_record_fqdns = [for record in aws_route53_record.main : record.fqdn]
+}
\ No newline at end of file
diff --git a/tfs/aws-eks-vpc/db.tf b/tfs/aws-eks-vpc/db.tf
new file mode 100644
index 00000000..893afda3
--- /dev/null
+++ b/tfs/aws-eks-vpc/db.tf
@@ -0,0 +1,38 @@
+resource "aws_db_subnet_group" "database" {
+ depends_on =[aws_subnet.dbsubnet]
+ name = "aws_db_subnet_group-demo-${random_string.role.id}"
+ subnet_ids = aws_subnet.dbsubnet[*].id
+ tags = {
+ Name = "DB subnet group"
+ }
+}
+
+resource "aws_security_group" "dbsg" {
+ depends_on =[aws_subnet.dbsubnet]
+ name = "db-${random_string.role.id}"
+ description = "security group for db"
+ vpc_id = aws_vpc.demo.id
+
+
+ # Allowing traffic only for Postgres and that too from same VPC only.
+ ingress {
+ description = "POSTGRES"
+ from_port = 5432
+ to_port = 5432
+ protocol = "tcp"
+ cidr_blocks = [var.cluster_ipv4_cidr]
+ }
+
+
+ # Allowing all outbound traffic
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = {
+ Name = "db-sg"
+ }
+}
diff --git a/tfs/aws-eks-vpc/eks-cluster.tf b/tfs/aws-eks-vpc/eks-cluster.tf
new file mode 100644
index 00000000..4af95a39
--- /dev/null
+++ b/tfs/aws-eks-vpc/eks-cluster.tf
@@ -0,0 +1,61 @@
+#
+# EKS Cluster Resources
+# * IAM Role to allow EKS service to manage other AWS services
+# * EC2 Security Group to allow networking traffic with EKS cluster
+# * EKS Cluster
+
+# resource "aws_cloudwatch_log_group" "demo" {
+# # The log group name format is /aws/eks//cluster
+# # Reference: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html
+# name = "/aws/eks/${aws_eks_cluster.demo.name}/cluster"
+# retention_in_days = 7
+# # ... potentially other configuration ...
+# }
+
+resource "aws_security_group" "demo-cluster" {
+ depends_on = [aws_vpc.demo]
+ name = "eks-demo-cluster-sg-${random_string.cluster.id}"
+ description = "Cluster communication with worker nodes"
+ vpc_id = aws_vpc.demo.id
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = {
+ Name = "terraform-eks-demo"
+ }
+}
+
+resource "aws_security_group_rule" "demo-cluster-ingress-workstation-https" {
+ depends_on = [aws_security_group.demo-cluster]
+ cidr_blocks = [local.workstation-external-cidr]
+ description = "Allow workstation to communicate with the cluster API Server"
+ from_port = 443
+ protocol = "tcp"
+ security_group_id = aws_security_group.demo-cluster.id
+ to_port = 443
+ type = "ingress"
+}
+
+resource "aws_eks_cluster" "demo" {
+ enabled_cluster_log_types = ["api", "audit","authenticator","controllerManager","scheduler"]
+ name = "${var.cluster-name}-${random_string.cluster.id}"
+ role_arn = aws_iam_role.cluster.arn
+
+ vpc_config {
+ security_group_ids = [aws_security_group.demo-cluster.id]
+ subnet_ids = aws_subnet.demo[*].id
+ }
+
+ depends_on = [
+ aws_internet_gateway.demo,
+ aws_security_group.demo-cluster,
+ aws_iam_role_policy_attachment.cluster-AmazonEKSClusterPolicy,
+ aws_iam_role_policy_attachment.cluster-AmazonEKSVPCResourceController,
+ aws_iam_role_policy_attachment.cluster-AmazonVPCFullAccess,
+ ]
+}
diff --git a/tfs/aws-eks-vpc/eks-worker-nodes.tf b/tfs/aws-eks-vpc/eks-worker-nodes.tf
new file mode 100644
index 00000000..809e9337
--- /dev/null
+++ b/tfs/aws-eks-vpc/eks-worker-nodes.tf
@@ -0,0 +1,90 @@
+#
+# EKS Worker Nodes Resources
+# * IAM role allowing Kubernetes actions to access other AWS services
+# * EKS Node Group to launch worker nodes
+#
+
+resource "aws_eks_node_group" "demo" {
+ cluster_name = aws_eks_cluster.demo.name
+ node_group_name = var.node-group-name
+ node_role_arn = aws_iam_role.node.arn
+ subnet_ids = aws_subnet.demo[*].id
+
+ scaling_config {
+ desired_size = 1
+ max_size = 1
+ min_size = 1
+ }
+
+ depends_on = [
+ kubernetes_config_map.aws_auth,
+ aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy,
+ aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy,
+ aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly,
+ ]
+
+}
+
+resource "null_resource" "delete_ingress" {
+ triggers = {
+ cluster_delete_name = aws_eks_cluster.demo.name
+ cluster_region = var.aws_region
+ }
+ depends_on = [
+ helm_release.aws_alb_controller,
+ aws_security_group.demo-cluster,
+ aws_iam_role_policy_attachment.cluster-AmazonEKSClusterPolicy,
+ aws_iam_role_policy_attachment.cluster-AmazonEKSVPCResourceController,
+ aws_iam_role_policy_attachment.cluster-AmazonVPCFullAccess,
+ aws_iam_role_policy_attachment.cluster-AmazonEKSServicePolicy,
+ aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy,
+ aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly,
+ aws_iam_role_policy_attachment.node-AmazonVPCFullAccess,
+ aws_iam_instance_profile.node,
+ aws_iam_role_policy_attachment.node-AWSLoadBalancerControllerIAMPolicy,
+ aws_iam_role_policy_attachment.cluster-AWSVisualEditorPolicy,
+ aws_iam_openid_connect_provider.cluster,
+ aws_iam_role.cluster,
+ aws_iam_role.node,
+ aws_iam_policy.AWSLoadBalancerControllerIAMPolicy,
+ aws_route_table_association.demo,
+ module.container-insights,
+ aws_route_table.demo,
+ aws_security_group_rule.demo-cluster-ingress-workstation-https
+ ]
+
+ provisioner "local-exec" {
+ when = destroy
+ command = <