diff --git a/tfs/aws-eks-vpc/aws-auth.tf b/tfs/aws-eks-vpc/aws-auth.tf new file mode 100644 index 00000000..f33a39e5 --- /dev/null +++ b/tfs/aws-eks-vpc/aws-auth.tf @@ -0,0 +1,58 @@ +locals { + workers_role_arns = [aws_iam_role.cluster.arn, aws_iam_role.node.arn] + + # Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap + # Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically + map_worker_roles = [ + for role_arn in local.workers_role_arns : { + rolearn : role_arn + username : "${role_arn}-user" + groups : [ + "system:bootstrappers", + "system:masters" + ] + } + ] +} + + +provider "kubernetes" { + host = data.null_data_source.cluster.outputs["cluster_endpoint"] + cluster_ca_certificate = base64decode(aws_eks_cluster.demo.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.cluster_auth.token +} + +data "null_data_source" "cluster" { + inputs = { + cluster_endpoint = aws_eks_cluster.demo.endpoint + } +} + + +resource "kubernetes_config_map" "aws_auth" { + depends_on = [aws_eks_cluster.demo] + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = { + mapRoles = yamlencode(distinct(concat(local.map_worker_roles))) + } +} + +resource "null_resource" "wait_for_cluster" { + depends_on = [ + aws_eks_cluster.demo, + aws_security_group.demo-cluster + ] + + provisioner "local-exec" { + command = var.wait_for_cluster_cmd + interpreter = var.wait_for_cluster_interpreter + environment = { + ENDPOINT = aws_eks_cluster.demo.endpoint + } + } +} \ No newline at end of file diff --git a/tfs/aws-eks-vpc/eks-cluster.tf b/tfs/aws-eks-vpc/eks-cluster.tf new file mode 100644 index 00000000..4af95a39 --- /dev/null +++ b/tfs/aws-eks-vpc/eks-cluster.tf @@ -0,0 +1,61 @@ +# +# EKS Cluster Resources +# * IAM Role to allow EKS service to manage other AWS services +# * EC2 Security Group to allow networking traffic with EKS cluster +# * EKS Cluster + +# resource "aws_cloudwatch_log_group" "demo" { +# # The log group name format is /aws/eks//cluster +# # Reference: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html +# name = "/aws/eks/${aws_eks_cluster.demo.name}/cluster" +# retention_in_days = 7 +# # ... potentially other configuration ... +# } + +resource "aws_security_group" "demo-cluster" { + depends_on = [aws_vpc.demo] + name = "eks-demo-cluster-sg-${random_string.cluster.id}" + description = "Cluster communication with worker nodes" + vpc_id = aws_vpc.demo.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "terraform-eks-demo" + } +} + +resource "aws_security_group_rule" "demo-cluster-ingress-workstation-https" { + depends_on = [aws_security_group.demo-cluster] + cidr_blocks = [local.workstation-external-cidr] + description = "Allow workstation to communicate with the cluster API Server" + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.demo-cluster.id + to_port = 443 + type = "ingress" +} + +resource "aws_eks_cluster" "demo" { + enabled_cluster_log_types = ["api", "audit","authenticator","controllerManager","scheduler"] + name = "${var.cluster-name}-${random_string.cluster.id}" + role_arn = aws_iam_role.cluster.arn + + vpc_config { + security_group_ids = [aws_security_group.demo-cluster.id] + subnet_ids = aws_subnet.demo[*].id + } + + depends_on = [ + aws_internet_gateway.demo, + aws_security_group.demo-cluster, + aws_iam_role_policy_attachment.cluster-AmazonEKSClusterPolicy, + aws_iam_role_policy_attachment.cluster-AmazonEKSVPCResourceController, + aws_iam_role_policy_attachment.cluster-AmazonVPCFullAccess, + ] +} diff --git a/tfs/aws-eks-vpc/eks-worker-nodes.tf b/tfs/aws-eks-vpc/eks-worker-nodes.tf new file mode 100644 index 00000000..809e9337 --- /dev/null +++ b/tfs/aws-eks-vpc/eks-worker-nodes.tf @@ -0,0 +1,90 @@ +# +# EKS Worker Nodes Resources +# * IAM role allowing Kubernetes actions to access other AWS services +# * EKS Node Group to launch worker nodes +# + +resource "aws_eks_node_group" "demo" { + cluster_name = aws_eks_cluster.demo.name + node_group_name = var.node-group-name + node_role_arn = aws_iam_role.node.arn + subnet_ids = aws_subnet.demo[*].id + + scaling_config { + desired_size = 1 + max_size = 1 + min_size = 1 + } + + depends_on = [ + kubernetes_config_map.aws_auth, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, + aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, + aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + ] + +} + +resource "null_resource" "delete_ingress" { + triggers = { + cluster_delete_name = aws_eks_cluster.demo.name + cluster_region = var.aws_region + } + depends_on = [ + helm_release.aws_alb_controller, + aws_security_group.demo-cluster, + aws_iam_role_policy_attachment.cluster-AmazonEKSClusterPolicy, + aws_iam_role_policy_attachment.cluster-AmazonEKSVPCResourceController, + aws_iam_role_policy_attachment.cluster-AmazonVPCFullAccess, + aws_iam_role_policy_attachment.cluster-AmazonEKSServicePolicy, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, + aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonVPCFullAccess, + aws_iam_instance_profile.node, + aws_iam_role_policy_attachment.node-AWSLoadBalancerControllerIAMPolicy, + aws_iam_role_policy_attachment.cluster-AWSVisualEditorPolicy, + aws_iam_openid_connect_provider.cluster, + aws_iam_role.cluster, + aws_iam_role.node, + aws_iam_policy.AWSLoadBalancerControllerIAMPolicy, + aws_route_table_association.demo, + module.container-insights, + aws_route_table.demo, + aws_security_group_rule.demo-cluster-ingress-workstation-https + ] + + provisioner "local-exec" { + when = destroy + command = <