From 25c9650645ce130ba13f95cf9ba89850fc7f98ce Mon Sep 17 00:00:00 2001 From: Deepak Verma Date: Wed, 9 Aug 2023 15:44:15 +0530 Subject: [PATCH] feat: default variable removed --- _example/aws_managed/example.tf | 111 ++++++----------- _example/aws_managed/versions.tf | 4 +- _example/aws_managed_with_fargate/example.tf | 120 +++++++------------ _example/complete/example.tf | 96 ++++----------- _example/self_managed/example.tf | 76 ++++-------- fargate_profile.tf | 22 ++-- node_group/fargate_profile/fargate.tf | 12 +- variables.tf | 24 +++- versions.tf | 4 +- 9 files changed, 163 insertions(+), 306 deletions(-) diff --git a/_example/aws_managed/example.tf b/_example/aws_managed/example.tf index 74613b5..a1073b6 100644 --- a/_example/aws_managed/example.tf +++ b/_example/aws_managed/example.tf @@ -3,48 +3,45 @@ provider "aws" { } locals { - - name = "clouddrove-eks" - region = "eu-west-1" + name = "clouddrove-eks" + region = "eu-west-1" + vpc_cidr_block = module.vpc.vpc_cidr_block + environment = "test" + label_order = ["name", "environment"] tags = { "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" } } ################################################################################ -# VPC +# VPC module call ################################################################################ - module "vpc" { source = "clouddrove/vpc/aws" version = "2.0.0" name = "${local.name}-vpc" - environment = "test" - label_order = ["environment", "name"] - - cidr_block = "10.10.0.0/16" + environment = local.environment + cidr_block = "10.10.0.0/16" } # ################################################################################ -# # Subnets +# # Subnets moudle call # ################################################################################ module "subnets" { source = "clouddrove/subnet/aws" version = "2.0.0" - name = "${local.name}-subnet" - environment = "test" - label_order = ["environment", "name"] - + name = "${local.name}-subnet" + environment = local.environment nat_gateway_enabled = true single_nat_gateway = true availability_zones = ["${local.region}a", "${local.region}b", "${local.region}c"] vpc_id = module.vpc.vpc_id type = "public-private" igw_id = module.vpc.igw_id - cidr_block = module.vpc.vpc_cidr_block + cidr_block = local.vpc_cidr_block ipv6_cidr_block = module.vpc.ipv6_cidr_block enable_ipv6 = false @@ -126,23 +123,21 @@ module "subnets" { } ################################################################################ -# Keypair +# Keypair module call ################################################################################ - module "keypair" { source = "clouddrove/keypair/aws" version = "1.3.0" - name = "${local.name}-key" - environment = "test" - label_order = ["name", "environment"] - + name = "${local.name}-key" + environment = local.environment + label_order = local.label_order enable_key_pair = true public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDc4AjHFctUATtd5of4u9bJtTgkh9bKogSDjxc9QqbylRORxUa422jO+t1ldTVdyqDRKltxQCJb4v23HZc2kssU5uROxpiF2fzgiHXRduL+RtyOtY2J+rNUdCRmHz4WQySblYpgteIJZpVo2smwdek8xSpjoHXhgxxa9hb4pQQwyjtVGEdH8vdYwtxgPZgPVaJgHVeJgVmhjTf2VGTATaeR9txzHsEPxhe/n1y34mQjX0ygEX8x0RZzlGziD1ih3KPaIHcpTVSYYk4LOoMK38vEI67SIMomskKn4yU043s+t9ZriJwk2V9+oU6tJU/5E1rd0SskXUhTypc3/Znc/rkYtLe8s6Uy26LOrBFzlhnCT7YH1XbCv3rEO+Nn184T4BSHeW2up8UJ1SOEd+WzzynXczdXoQcBN2kaz4dYFpRXchsAB6ejZrbEq7wyZvutf11OiS21XQ67+30lEL2WAO4i95e4sI8AdgwJgzrqVcicr3ImE+BRDkndMn5k1LhNGqwMD3Iuoel84xvinPAcElDLiFmL3BJVA/53bAlUmWqvUGW9SL5JpLUmZgE6kp+Tps7D9jpooGGJKmqgJLkJTzAmTSJh0gea/rT5KwI4j169TQD9xl6wFqns4BdQ4dMKHQCgDx8LbEd96l9F9ruWwQ8EAZBe4nIEKTV9ri+04JVhSQ== hello@clouddrove.com" } # ################################################################################ -# Security Groups +# Security Groups module call ################################################################################ module "ssh" { @@ -150,16 +145,14 @@ module "ssh" { version = "2.0.0" name = "${local.name}-ssh" - environment = "test" - label_order = ["environment", "name"] - - vpc_id = module.vpc.vpc_id + environment = local.environment + vpc_id = module.vpc.vpc_id new_sg_ingress_rules_with_cidr_blocks = [{ rule_count = 1 from_port = 22 protocol = "tcp" to_port = 22 - cidr_blocks = [module.vpc.vpc_cidr_block, "172.16.0.0/16"] + cidr_blocks = [local.vpc_cidr_block, "172.16.0.0/16"] description = "Allow ssh traffic." }, { @@ -178,7 +171,7 @@ module "ssh" { from_port = 22 protocol = "tcp" to_port = 22 - cidr_blocks = [module.vpc.vpc_cidr_block, "172.16.0.0/16"] + cidr_blocks = [local.vpc_cidr_block, "172.16.0.0/16"] description = "Allow ssh outbound traffic." }, { @@ -196,8 +189,7 @@ module "http_https" { version = "2.0.0" name = "${local.name}-http-https" - environment = "test" - label_order = ["name", "environment"] + environment = local.environment vpc_id = module.vpc.vpc_id ## INGRESS Rules @@ -206,7 +198,7 @@ module "http_https" { from_port = 22 protocol = "tcp" to_port = 22 - cidr_blocks = [module.vpc.vpc_cidr_block] + cidr_blocks = [local.vpc_cidr_block] description = "Allow ssh traffic." }, { @@ -214,7 +206,7 @@ module "http_https" { from_port = 80 protocol = "tcp" to_port = 80 - cidr_blocks = [module.vpc.vpc_cidr_block] + cidr_blocks = [local.vpc_cidr_block] description = "Allow http traffic." }, { @@ -222,7 +214,7 @@ module "http_https" { from_port = 443 protocol = "tcp" to_port = 443 - cidr_blocks = [module.vpc.vpc_cidr_block] + cidr_blocks = [local.vpc_cidr_block] description = "Allow https traffic." } ] @@ -241,15 +233,15 @@ module "http_https" { } ################################################################################ -# KMS Module +# KMS Module call ################################################################################ module "kms" { source = "clouddrove/kms/aws" version = "1.3.0" name = "${local.name}-kmss" - environment = "test" - label_order = ["environment", "name"] + environment = local.environment + label_order = local.label_order enabled = true description = "KMS key for EBS of EKS nodes" enable_key_rotation = false @@ -272,33 +264,26 @@ data "aws_iam_policy_document" "kms" { } ################################################################################ -# EKS Module +# EKS Module call ################################################################################ - module "eks" { source = "../.." enabled = true name = local.name - environment = "test" - label_order = ["environment", "name"] + environment = local.environment + label_order = local.label_order # EKS - kubernetes_version = "1.27" - endpoint_private_access = true - endpoint_public_access = true - enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] - + kubernetes_version = "1.27" + endpoint_public_access = true # Networking vpc_id = module.vpc.vpc_id subnet_ids = module.subnets.private_subnet_id allowed_security_groups = [module.ssh.security_group_id] eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] allowed_cidr_blocks = ["10.0.0.0/16"] - - ################################################################################ # AWS Managed Node Group - ################################################################################ # Node Groups Defaults Values It will Work all Node Groups managed_node_group_defaults = { subnet_ids = module.subnets.private_subnet_id @@ -327,7 +312,7 @@ module "eks" { name = "${module.eks.cluster_name}-critical" capacity_type = "SPOT" min_size = 1 - max_size = 7 + max_size = 2 desired_size = 2 instance_types = ["t3.medium"] } @@ -336,32 +321,12 @@ module "eks" { name = "${module.eks.cluster_name}-application" capacity_type = "SPOT" min_size = 1 - max_size = 7 + max_size = 2 desired_size = 1 force_update_version = true instance_types = ["t3.medium"] } } - - # -- Enable Add-Ons in EKS Cluster - addons = [ - { - addon_name = "coredns" - addon_version = "v1.10.1-eksbuild.2" - resolve_conflicts = "OVERWRITE" - }, - { - addon_name = "kube-proxy" - addon_version = "v1.27.3-eksbuild.2" - resolve_conflicts = "OVERWRITE" - }, - { - addon_name = "vpc-cni" - addon_version = "v1.13.4-eksbuild.1" - resolve_conflicts = "OVERWRITE" - }, - ] - # -- Set this to `true` only when you have correct iam_user details. apply_config_map_aws_auth = true map_additional_iam_users = [ @@ -372,11 +337,7 @@ module "eks" { } ] } - -################################################################################ -# Kubernetes provider configuration -################################################################################ - +## Kubernetes provider configuration data "aws_eks_cluster" "this" { depends_on = [module.eks] name = module.eks.cluster_id diff --git a/_example/aws_managed/versions.tf b/_example/aws_managed/versions.tf index 0866ed0..8c08eed 100644 --- a/_example/aws_managed/versions.tf +++ b/_example/aws_managed/versions.tf @@ -1,11 +1,11 @@ # Terraform version terraform { - required_version = ">= 1.5.0" + required_version = ">= 1.5.4" required_providers { aws = { source = "hashicorp/aws" - version = ">= 5.5.0" + version = ">= 5.11.0" } } } \ No newline at end of file diff --git a/_example/aws_managed_with_fargate/example.tf b/_example/aws_managed_with_fargate/example.tf index 3f9f75e..77b307b 100644 --- a/_example/aws_managed_with_fargate/example.tf +++ b/_example/aws_managed_with_fargate/example.tf @@ -3,48 +3,45 @@ provider "aws" { } locals { - - name = "clouddrove-eks" - region = "eu-west-1" + name = "clouddrove-eks" + region = "eu-west-1" + vpc_cidr_block = module.vpc.vpc_cidr_block + environment = "test" + label_order = ["name", "environment"] tags = { "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" } } ################################################################################ -# VPC +# VPC module call ################################################################################ - module "vpc" { source = "clouddrove/vpc/aws" version = "2.0.0" name = "${local.name}-vpc" - environment = "test" - label_order = ["environment", "name"] - - cidr_block = "10.10.0.0/16" + environment = local.environment + cidr_block = "10.10.0.0/16" } # ################################################################################ -# # Subnets +# # Subnets moudle call # ################################################################################ module "subnets" { source = "clouddrove/subnet/aws" version = "2.0.0" - name = "${local.name}-subnet" - environment = "test" - label_order = ["environment", "name"] - + name = "${local.name}-subnet" + environment = local.environment nat_gateway_enabled = true single_nat_gateway = true availability_zones = ["${local.region}a", "${local.region}b", "${local.region}c"] vpc_id = module.vpc.vpc_id type = "public-private" igw_id = module.vpc.igw_id - cidr_block = module.vpc.vpc_cidr_block + cidr_block = local.vpc_cidr_block ipv6_cidr_block = module.vpc.ipv6_cidr_block enable_ipv6 = false @@ -126,23 +123,21 @@ module "subnets" { } ################################################################################ -# Keypair +# Keypair module call ################################################################################ - module "keypair" { source = "clouddrove/keypair/aws" version = "1.3.0" - name = "${local.name}-key" - environment = "test" - label_order = ["name", "environment"] - + name = "${local.name}-key" + environment = local.environment + label_order = local.label_order enable_key_pair = true public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDc4AjHFctUATtd5of4u9bJtTgkh9bKogSDjxc9QqbylRORxUa422jO+t1ldTVdyqDRKltxQCJb4v23HZc2kssU5uROxpiF2fzgiHXRduL+RtyOtY2J+rNUdCRmHz4WQySblYpgteIJZpVo2smwdek8xSpjoHXhgxxa9hb4pQQwyjtVGEdH8vdYwtxgPZgPVaJgHVeJgVmhjTf2VGTATaeR9txzHsEPxhe/n1y34mQjX0ygEX8x0RZzlGziD1ih3KPaIHcpTVSYYk4LOoMK38vEI67SIMomskKn4yU043s+t9ZriJwk2V9+oU6tJU/5E1rd0SskXUhTypc3/Znc/rkYtLe8s6Uy26LOrBFzlhnCT7YH1XbCv3rEO+Nn184T4BSHeW2up8UJ1SOEd+WzzynXczdXoQcBN2kaz4dYFpRXchsAB6ejZrbEq7wyZvutf11OiS21XQ67+30lEL2WAO4i95e4sI8AdgwJgzrqVcicr3ImE+BRDkndMn5k1LhNGqwMD3Iuoel84xvinPAcElDLiFmL3BJVA/53bAlUmWqvUGW9SL5JpLUmZgE6kp+Tps7D9jpooGGJKmqgJLkJTzAmTSJh0gea/rT5KwI4j169TQD9xl6wFqns4BdQ4dMKHQCgDx8LbEd96l9F9ruWwQ8EAZBe4nIEKTV9ri+04JVhSQ== hello@clouddrove.com" } # ################################################################################ -# Security Groups +# Security Groups module call ################################################################################ module "ssh" { @@ -150,16 +145,14 @@ module "ssh" { version = "2.0.0" name = "${local.name}-ssh" - environment = "test" - label_order = ["environment", "name"] - - vpc_id = module.vpc.vpc_id + environment = local.environment + vpc_id = module.vpc.vpc_id new_sg_ingress_rules_with_cidr_blocks = [{ rule_count = 1 from_port = 22 protocol = "tcp" to_port = 22 - cidr_blocks = [module.vpc.vpc_cidr_block, "172.16.0.0/16"] + cidr_blocks = [local.vpc_cidr_block, "172.16.0.0/16"] description = "Allow ssh traffic." }, { @@ -178,7 +171,7 @@ module "ssh" { from_port = 22 protocol = "tcp" to_port = 22 - cidr_blocks = [module.vpc.vpc_cidr_block, "172.16.0.0/16"] + cidr_blocks = [local.vpc_cidr_block, "172.16.0.0/16"] description = "Allow ssh outbound traffic." }, { @@ -196,8 +189,7 @@ module "http_https" { version = "2.0.0" name = "${local.name}-http-https" - environment = "test" - label_order = ["name", "environment"] + environment = local.environment vpc_id = module.vpc.vpc_id ## INGRESS Rules @@ -206,7 +198,7 @@ module "http_https" { from_port = 22 protocol = "tcp" to_port = 22 - cidr_blocks = [module.vpc.vpc_cidr_block] + cidr_blocks = [local.vpc_cidr_block] description = "Allow ssh traffic." }, { @@ -214,7 +206,7 @@ module "http_https" { from_port = 80 protocol = "tcp" to_port = 80 - cidr_blocks = [module.vpc.vpc_cidr_block] + cidr_blocks = [local.vpc_cidr_block] description = "Allow http traffic." }, { @@ -222,7 +214,7 @@ module "http_https" { from_port = 443 protocol = "tcp" to_port = 443 - cidr_blocks = [module.vpc.vpc_cidr_block] + cidr_blocks = [local.vpc_cidr_block] description = "Allow https traffic." } ] @@ -241,15 +233,15 @@ module "http_https" { } ################################################################################ -# KMS Module +# KMS Module call ################################################################################ module "kms" { source = "clouddrove/kms/aws" version = "1.3.0" name = "${local.name}-kmss" - environment = "test" - label_order = ["environment", "name"] + environment = local.environment + label_order = local.label_order enabled = true description = "KMS key for EBS of EKS nodes" enable_key_rotation = false @@ -272,33 +264,26 @@ data "aws_iam_policy_document" "kms" { } ################################################################################ -# EKS Module +# EKS Module call ################################################################################ - module "eks" { source = "../.." enabled = true name = local.name - environment = "test" - label_order = ["environment", "name"] + environment = local.environment + label_order = local.label_order # EKS - kubernetes_version = "1.27" - endpoint_private_access = true - endpoint_public_access = true - enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] - + kubernetes_version = "1.27" + endpoint_public_access = true # Networking vpc_id = module.vpc.vpc_id subnet_ids = module.subnets.private_subnet_id allowed_security_groups = [module.ssh.security_group_id] eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] allowed_cidr_blocks = ["10.0.0.0/16"] - - ################################################################################ # AWS Managed Node Group - ################################################################################ # Node Groups Defaults Values It will Work all Node Groups managed_node_group_defaults = { subnet_ids = module.subnets.private_subnet_id @@ -327,7 +312,7 @@ module "eks" { name = "${module.eks.cluster_name}-critical" capacity_type = "SPOT" min_size = 1 - max_size = 7 + max_size = 2 desired_size = 2 instance_types = ["t3.medium"] } @@ -336,32 +321,12 @@ module "eks" { name = "${module.eks.cluster_name}-application" capacity_type = "SPOT" min_size = 1 - max_size = 7 + max_size = 2 desired_size = 1 force_update_version = true instance_types = ["t3.medium"] } } - - # -- Enable Add-Ons in EKS Cluster - addons = [ - { - addon_name = "coredns" - addon_version = "v1.10.1-eksbuild.2" - resolve_conflicts = "OVERWRITE" - }, - { - addon_name = "kube-proxy" - addon_version = "v1.27.3-eksbuild.2" - resolve_conflicts = "OVERWRITE" - }, - { - addon_name = "vpc-cni" - addon_version = "v1.13.4-eksbuild.1" - resolve_conflicts = "OVERWRITE" - }, - ] - # -- Set this to `true` only when you have correct iam_user details. apply_config_map_aws_auth = true map_additional_iam_users = [ @@ -371,21 +336,16 @@ module "eks" { groups = ["system:masters"] } ] - - fargate_enabled = true + #fargate profile + fargate_enabled = true fargate_profiles = { profile-0 = { - addon_name = "0" - namespace = "default" + addon_name = "0" + namespace = "default" } } - } - -################################################################################ -# Kubernetes provider configuration -################################################################################ - +## Kubernetes provider configuration data "aws_eks_cluster" "this" { depends_on = [module.eks] name = module.eks.cluster_id diff --git a/_example/complete/example.tf b/_example/complete/example.tf index 4d47d9c..8ebe8a4 100644 --- a/_example/complete/example.tf +++ b/_example/complete/example.tf @@ -2,39 +2,36 @@ provider "aws" { region = local.region } locals { - name = "clouddrove-eks" - region = "eu-west-1" + name = "clouddrove-eks" + region = "eu-west-1" + environment = "test" + label_order = ["name", "environment"] tags = { "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" } } ################################################################################ -# VPC +# VPC module call ################################################################################ - module "vpc" { source = "clouddrove/vpc/aws" version = "2.0.0" name = "${local.name}-vpc" - environment = "test" - label_order = ["environment", "name"] - - cidr_block = "10.10.0.0/16" + environment = local.environment + cidr_block = "10.10.0.0/16" } ################################################################################ -# Subnets +# Subnets module call ################################################################################ - module "subnets" { source = "clouddrove/subnet/aws" version = "2.0.0" name = "${local.name}-subnets" - environment = "test" - label_order = ["environment", "name"] + environment = local.environment tags = local.tags nat_gateway_enabled = true @@ -123,15 +120,14 @@ module "subnets" { } ################################################################################ -# Keypair +# Keypair module call ################################################################################ - module "keypair" { source = "clouddrove/keypair/aws" version = "1.3.0" name = "${local.name}-key" - environment = "test" + environment = local.environment label_order = ["name", "environment"] enable_key_pair = true @@ -139,16 +135,14 @@ module "keypair" { } # ################################################################################ -# Security Groups +# Security Groups module call ################################################################################ - module "ssh" { source = "clouddrove/security-group/aws" version = "2.0.0" name = "${local.name}-ssh" - environment = "test" - label_order = ["environment", "name"] + environment = local.environment vpc_id = module.vpc.vpc_id new_sg_ingress_rules_with_cidr_blocks = [{ @@ -193,8 +187,7 @@ module "http_https" { version = "2.0.0" name = "${local.name}-http-https" - environment = "test" - label_order = ["name", "environment"] + environment = local.environment vpc_id = module.vpc.vpc_id ## INGRESS Rules @@ -238,15 +231,15 @@ module "http_https" { } ################################################################################ -# KMS Module +# KMS Module call ################################################################################ module "kms" { source = "clouddrove/kms/aws" version = "1.3.0" name = "${local.name}-kms-nw" - environment = "test" - label_order = ["environment", "name"] + environment = local.environment + label_order = local.label_order enabled = true description = "KMS key for EBS of EKS nodes" enable_key_rotation = false @@ -269,21 +262,18 @@ data "aws_iam_policy_document" "kms" { } ################################################################################ -# EKS Module +# EKS Module call ################################################################################ - module "eks" { source = "../.." name = local.name - environment = "test" - label_order = ["environment", "name"] + environment = local.environment enabled = true - kubernetes_version = "1.27" - endpoint_private_access = true - endpoint_public_access = true - oidc_provider_enabled = true + kubernetes_version = "1.27" + endpoint_private_access = true + endpoint_public_access = true # Networking vpc_id = module.vpc.vpc_id @@ -292,9 +282,7 @@ module "eks" { eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] allowed_cidr_blocks = ["10.0.0.0/16"] - ################################################################################ # Self Managed Node Group - ################################################################################ # Node Groups Defaults Values It will Work all Node Groups self_node_group_defaults = { subnet_ids = module.subnets.private_subnet_id @@ -308,10 +296,8 @@ module "eks" { key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" value = "owned" propagate_at_launch = true - } ] - block_device_mappings = { xvda = { device_name = "/dev/xvda" @@ -326,8 +312,6 @@ module "eks" { } } } - - self_node_groups = { self_managed_critical = { name = "self_managed_critical" @@ -337,7 +321,6 @@ module "eks" { bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" instance_type = "t3.medium" } - self_managed_application = { name = "self_managed_application" instance_market_options = { @@ -372,10 +355,7 @@ module "eks" { } } - - ################################################################################ # AWS Managed Node Group - ################################################################################ # Node Groups Defaults Values It will Work all Node Groups managed_node_group_defaults = { subnet_ids = module.subnets.private_subnet_id @@ -385,7 +365,6 @@ module "eks" { "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" "k8s.io/cluster/${module.eks.cluster_name}" = "shared" } - block_device_mappings = { xvda = { device_name = "/dev/xvda" @@ -400,7 +379,6 @@ module "eks" { } } } - managed_node_group = { critical = { name = "${module.eks.cluster_name}-critical" @@ -409,7 +387,6 @@ module "eks" { desired_size = 1 instance_types = ["t3.medium"] } - application = { name = "${module.eks.cluster_name}-application" capacity_type = "SPOT" @@ -421,29 +398,6 @@ module "eks" { instance_types = ["t3.medium"] } } - - # -- Enable Add-Ons in EKS Cluster - addons = [ - { - addon_name = "coredns" - addon_version = "v1.10.1-eksbuild.2" - resolve_conflicts = "OVERWRITE" - service_account_role_arn = "${module.eks.node_group_iam_role_arn}" - }, - { - addon_name = "kube-proxy" - addon_version = "v1.27.3-eksbuild.2" - resolve_conflicts = "OVERWRITE" - service_account_role_arn = "${module.eks.node_group_iam_role_arn}" - }, - { - addon_name = "vpc-cni" - addon_version = "v1.13.4-eksbuild.1" - resolve_conflicts = "OVERWRITE" - service_account_role_arn = "${module.eks.node_group_iam_role_arn}" - }, - ] - apply_config_map_aws_auth = true map_additional_iam_users = [ { @@ -473,13 +427,9 @@ module "eks" { recurrence = "0 7 * * 5" } } - } -################################################################################ # Kubernetes provider configuration -################################################################################ - data "aws_eks_cluster" "this" { name = module.eks.cluster_id } @@ -491,4 +441,4 @@ provider "kubernetes" { host = data.aws_eks_cluster.this.endpoint cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) token = data.aws_eks_cluster_auth.this.token -} +} \ No newline at end of file diff --git a/_example/self_managed/example.tf b/_example/self_managed/example.tf index 26891be..8395afd 100644 --- a/_example/self_managed/example.tf +++ b/_example/self_managed/example.tf @@ -2,42 +2,37 @@ provider "aws" { region = local.region } locals { - - name = "clouddrove-eks" - region = "eu-west-1" + name = "clouddrove-eks" + region = "eu-west-1" + environment = "test" + label_order = ["name", "environment"] tags = { "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" } } ################################################################################ -# VPC +# VPC module call ################################################################################ - module "vpc" { source = "clouddrove/vpc/aws" version = "2.0.0" name = "${local.name}-vpc" - environment = "test" - label_order = ["environment", "name"] - - cidr_block = "10.10.0.0/16" + environment = local.environment + cidr_block = "10.10.0.0/16" } ################################################################################ # Subnets ################################################################################ - module "subnets" { source = "clouddrove/subnet/aws" version = "2.0.0" - name = "${local.name}-subnets" - environment = "test" - label_order = ["environment", "name"] - tags = local.tags - + name = "${local.name}-subnets" + environment = local.environment + tags = local.tags nat_gateway_enabled = true availability_zones = ["${local.region}a", "${local.region}b"] vpc_id = module.vpc.vpc_id @@ -45,7 +40,6 @@ module "subnets" { ipv6_cidr_block = module.vpc.ipv6_cidr_block type = "public-private" igw_id = module.vpc.igw_id - public_inbound_acl_rules = [ { rule_number = 100 @@ -64,7 +58,6 @@ module "subnets" { ipv6_cidr_block = "::/0" }, ] - public_outbound_acl_rules = [ { rule_number = 100 @@ -83,7 +76,6 @@ module "subnets" { ipv6_cidr_block = "::/0" }, ] - private_inbound_acl_rules = [ { rule_number = 100 @@ -102,7 +94,6 @@ module "subnets" { ipv6_cidr_block = "::/0" }, ] - private_outbound_acl_rules = [ { rule_number = 100 @@ -124,17 +115,15 @@ module "subnets" { } ################################################################################ -# Keypair +# Keypair module call ################################################################################ - module "keypair" { source = "clouddrove/keypair/aws" version = "1.3.0" - name = "${local.name}-key" - environment = "test" - label_order = ["name", "environment"] - + name = "${local.name}-key" + environment = "test" + label_order = ["name", "environment"] enable_key_pair = true public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDc4AjHFctUATtd5of4u9bJtTgkh9bKogSDjxc9QqbylRORxUa422jO+t1ldTVdyqDRKltxQCJb4v23HZc2kssU5uROxpiF2fzgiHXRduL+RtyOtY2J+rNUdCRmHz4WQySblYpgteIJZpVo2smwdek8xSpjoHXhgxxa9hb4pQQwyjtVGEdH8vdYwtxgPZgPVaJgHVeJgVmhjTf2VGTATaeR9txzHsEPxhe/n1y34mQjX0ygEX8x0RZzlGziD1ih3KPaIHcpTVSYYk4LOoMK38vEI67SIMomskKn4yU043s+t9ZriJwk2V9+oU6tJU/5E1rd0SskXUhTypc3/Znc/rkYtLe8s6Uy26LOrBFzlhnCT7YH1XbCv3rEO+Nn184T4BSHeW2up8UJ1SOEd+WzzynXczdXoQcBN2kaz4dYFpRXchsAB6ejZrbEq7wyZvutf11OiS21XQ67+30lEL2WAO4i95e4sI8AdgwJgzrqVcicr3ImE+BRDkndMn5k1LhNGqwMD3Iuoel84xvinPAcElDLiFmL3BJVA/53bAlUmWqvUGW9SL5JpLUmZgE6kp+Tps7D9jpooGGJKmqgJLkJTzAmTSJh0gea/rT5KwI4j169TQD9xl6wFqns4BdQ4dMKHQCgDx8LbEd96l9F9ruWwQ8EAZBe4nIEKTV9ri+04JVhSQ== hello@clouddrove.com" } @@ -148,10 +137,8 @@ module "ssh" { version = "2.0.0" name = "${local.name}-ssh" - environment = "test" - label_order = ["environment", "name"] - - vpc_id = module.vpc.vpc_id + environment = local.environment + vpc_id = module.vpc.vpc_id new_sg_ingress_rules_with_cidr_blocks = [{ rule_count = 1 from_port = 22 @@ -169,7 +156,6 @@ module "ssh" { description = "Allow Mongodb traffic." } ] - ## EGRESS Rules new_sg_egress_rules_with_cidr_blocks = [{ rule_count = 1 @@ -194,10 +180,8 @@ module "http_https" { version = "2.0.0" name = "${local.name}-http-https" - environment = "test" - label_order = ["name", "environment"] - - vpc_id = module.vpc.vpc_id + environment = local.environment + vpc_id = module.vpc.vpc_id ## INGRESS Rules new_sg_ingress_rules_with_cidr_blocks = [{ rule_count = 1 @@ -239,35 +223,25 @@ module "http_https" { } ################################################################################ -# EKS Module +# EKS Module call ################################################################################ - module "eks" { source = "../.." name = local.name environment = "test" - label_order = ["environment", "name"] - enabled = true # EKS - kubernetes_version = "1.27" - endpoint_private_access = true - endpoint_public_access = true - enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] - oidc_provider_enabled = true - + kubernetes_version = "1.27" + endpoint_private_access = true + endpoint_public_access = true # Networking vpc_id = module.vpc.vpc_id subnet_ids = module.subnets.private_subnet_id allowed_security_groups = [module.ssh.security_group_id] eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] allowed_cidr_blocks = ["10.0.0.0/16"] - - - ################################################################################ - # Self Managed Node Group - ################################################################################ + # Self Managed Node Grou # Node Groups Defaults Values It will Work all Node Groups self_node_group_defaults = { subnet_ids = module.subnets.private_subnet_id @@ -307,7 +281,6 @@ module "eks" { bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" instance_type = "t3.medium" } - application = { name = "${module.eks.cluster_name}-application" instance_market_options = { @@ -342,10 +315,7 @@ module "eks" { } } } - -################################################################################ # Kubernetes provider configuration -################################################################################ data "aws_eks_cluster" "this" { name = module.eks.cluster_id } diff --git a/fargate_profile.tf b/fargate_profile.tf index 25d52bc..2298139 100644 --- a/fargate_profile.tf +++ b/fargate_profile.tf @@ -1,13 +1,13 @@ module "fargate" { - source = "./node_group/fargate_profile" - - name = var.name - environment = var.environment - label_order = var.label_order - enabled =var.enabled - fargate_enabled = var.fargate_enabled - cluster_name = join("", aws_eks_cluster.default.*.name) - fargate_profiles = var.fargate_profiles - subnet_ids = var.subnet_ids - + source = "./node_group/fargate_profile" + + name = var.name + environment = var.environment + label_order = var.label_order + enabled = var.enabled + fargate_enabled = var.fargate_enabled + cluster_name = join("", aws_eks_cluster.default.*.name) + fargate_profiles = var.fargate_profiles + subnet_ids = var.subnet_ids + } \ No newline at end of file diff --git a/node_group/fargate_profile/fargate.tf b/node_group/fargate_profile/fargate.tf index d3f5256..7912d22 100644 --- a/node_group/fargate_profile/fargate.tf +++ b/node_group/fargate_profile/fargate.tf @@ -1,8 +1,8 @@ terraform { required_providers { aws = { - source = "hashicorp/aws" - version = ">= 3.1.15" + source = "hashicorp/aws" + version = ">= 3.1.15" } } } @@ -25,7 +25,7 @@ module "labels" { #Module : IAM ROLE #Description : Provides an IAM role. resource "aws_iam_role" "fargate_role" { - count = var.enabled && var.fargate_enabled ? 1 : 0 + count = var.enabled && var.fargate_enabled ? 1 : 0 name = format("%s-fargate-role", module.labels.id) assume_role_policy = join("", data.aws_iam_policy_document.aws_eks_fargate_policy.*.json) @@ -33,7 +33,7 @@ resource "aws_iam_role" "fargate_role" { } resource "aws_iam_role_policy_attachment" "amazon_eks_fargate_pod_execution_role_policy" { - count = var.enabled && var.fargate_enabled ? 1 : 0 + count = var.enabled && var.fargate_enabled ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy" role = join("", aws_iam_role.fargate_role.*.name) @@ -45,14 +45,14 @@ resource "aws_eks_fargate_profile" "default" { for_each = var.enabled && var.fargate_enabled ? var.fargate_profiles : {} cluster_name = var.cluster_name - fargate_profile_name = format("%s-%s", module.labels.id, each.value.addon_name ) + fargate_profile_name = format("%s-%s", module.labels.id, each.value.addon_name) pod_execution_role_arn = aws_iam_role.fargate_role[0].arn subnet_ids = var.subnet_ids tags = module.labels.tags selector { namespace = lookup(each.value, "namespace", "default") - labels = lookup(each.value, "labels", null ) + labels = lookup(each.value, "labels", null) } } diff --git a/variables.tf b/variables.tf index 917dbd7..5edfb8c 100644 --- a/variables.tf +++ b/variables.tf @@ -20,7 +20,7 @@ variable "environment" { variable "label_order" { type = list(any) - default = [] + default = ["name", "environment"] description = "Label order, e.g. `name`,`application`." } @@ -90,8 +90,24 @@ variable "nodes_additional_security_group_ids" { description = "EKS additional node group ids" } variable "addons" { - type = any - default = [] + type = any + default = [ + { + addon_name = "coredns" + addon_version = "v1.10.1-eksbuild.2" + resolve_conflicts = "OVERWRITE" + }, + { + addon_name = "kube-proxy" + addon_version = "v1.27.3-eksbuild.2" + resolve_conflicts = "OVERWRITE" + }, + { + addon_name = "vpc-cni" + addon_version = "v1.13.4-eksbuild.1" + resolve_conflicts = "OVERWRITE" + }, + ] description = "Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources." } @@ -198,7 +214,7 @@ variable "public_access_cidrs" { variable "endpoint_private_access" { type = bool - default = false + default = true description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false." } diff --git a/versions.tf b/versions.tf index 0866ed0..8c08eed 100644 --- a/versions.tf +++ b/versions.tf @@ -1,11 +1,11 @@ # Terraform version terraform { - required_version = ">= 1.5.0" + required_version = ">= 1.5.4" required_providers { aws = { source = "hashicorp/aws" - version = ">= 5.5.0" + version = ">= 5.11.0" } } } \ No newline at end of file