Skip to content

Commit

Permalink
update 1.30
Browse files Browse the repository at this point in the history
  • Loading branch information
paulofponciano committed Sep 12, 2024
1 parent 03c9178 commit f91fb10
Show file tree
Hide file tree
Showing 13 changed files with 56 additions and 73 deletions.
20 changes: 12 additions & 8 deletions eks.tf
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,17 @@ resource "aws_eks_cluster" "eks_cluster" {
resources = ["secrets"]
}

# access_config {
# authentication_mode = "API_AND_CONFIG_MAP"
# }

enabled_cluster_log_types = var.enabled_cluster_log_types

tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"Environment" = "${var.environment}"
"Project" = "${var.project}"
Terraform = true
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"Environment" = "${var.environment}"
"Project" = "${var.project}"
Terraform = true
}

}
Expand Down Expand Up @@ -73,10 +77,10 @@ resource "aws_eks_node_group" "cluster" {
}

tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "owned",
"Environment" = "${var.environment}"
"Project" = "${var.project}"
Terraform = true
"kubernetes.io/cluster/${var.cluster_name}" = "owned",
"Environment" = "${var.environment}"
"Project" = "${var.project}"
Terraform = true
}

lifecycle {
Expand Down
12 changes: 8 additions & 4 deletions eks_addons.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ resource "aws_eks_addon" "cni" {
addon_name = "vpc-cni"

addon_version = var.addon_cni_version
resolve_conflicts = "OVERWRITE"
resolve_conflicts_on_update = "OVERWRITE"
resolve_conflicts_on_create = "OVERWRITE"

depends_on = [
kubernetes_config_map.aws-auth
Expand All @@ -20,7 +21,8 @@ resource "aws_eks_addon" "coredns" {
addon_name = "coredns"

addon_version = var.addon_coredns_version
resolve_conflicts = "OVERWRITE"
resolve_conflicts_on_update = "OVERWRITE"
resolve_conflicts_on_create = "OVERWRITE"

depends_on = [
aws_eks_node_group.cluster,
Expand All @@ -35,7 +37,8 @@ resource "aws_eks_addon" "kubeproxy" {
addon_name = "kube-proxy"

addon_version = var.addon_kubeproxy_version
resolve_conflicts = "OVERWRITE"
resolve_conflicts_on_update = "OVERWRITE"
resolve_conflicts_on_create = "OVERWRITE"

depends_on = [
kubernetes_config_map.aws-auth
Expand All @@ -49,7 +52,8 @@ resource "aws_eks_addon" "csi_driver" {
addon_name = "aws-ebs-csi-driver"

addon_version = var.addon_csi_version
resolve_conflicts = "OVERWRITE"
resolve_conflicts_on_update = "OVERWRITE"
resolve_conflicts_on_create = "OVERWRITE"

depends_on = [
aws_eks_node_group.cluster,
Expand Down
2 changes: 1 addition & 1 deletion helm_alb_ingress_controller.tf
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,6 @@ resource "helm_release" "alb_ingress_controller" {
aws_eks_cluster.eks_cluster,
aws_eks_node_group.cluster,
kubernetes_config_map.aws-auth,
time_sleep.wait_30_seconds_karpenter
#time_sleep.wait_30_seconds_karpenter
]
}
6 changes: 3 additions & 3 deletions helm_istio.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ resource "helm_release" "istio_base" {
namespace = "istio-system"
create_namespace = true

version = "1.21.0"
version = "1.23.0"

depends_on = [
aws_eks_cluster.eks_cluster,
Expand All @@ -23,7 +23,7 @@ resource "helm_release" "istiod" {
namespace = "istio-system"
create_namespace = true

version = "1.21.0"
version = "1.23.0"

depends_on = [
aws_eks_cluster.eks_cluster,
Expand All @@ -42,7 +42,7 @@ resource "helm_release" "istio_ingress" {
namespace = "istio-system"
create_namespace = true

version = "1.21.0"
version = "1.23.0"

set {
name = "service.type"
Expand Down
51 changes: 12 additions & 39 deletions helm_karpenter.tf
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
resource "helm_release" "karpenter" {
namespace = "karpenter"
namespace = "kube-system"
create_namespace = true

name = "karpenter"
repository = "oci://public.ecr.aws/karpenter"
chart = "karpenter"
version = "v0.34.3"
version = "1.0.1"

set {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
Expand Down Expand Up @@ -41,50 +41,23 @@ resource "time_sleep" "wait_30_seconds_karpenter" {
create_duration = "30s"
}

# resource "kubectl_manifest" "karpenter_nodepool" {
# yaml_body = templatefile(
# "./karpenter/nodepool.yml.tpl", {
# EKS_CLUSTER = var.cluster_name
# CAPACITY_TYPE = var.karpenter_capacity_type
# INSTANCE_FAMILY = var.karpenter_instance_class
# INSTANCE_SIZES = var.karpenter_instance_size
# AVAILABILITY_ZONES = var.karpenter_azs
# })

# depends_on = [
# helm_release.karpenter,
# time_sleep.wait_30_seconds_karpenter
# ]
# }

# resource "kubectl_manifest" "karpenter_nodeclass" {
# yaml_body = templatefile(
# "./karpenter/nodeclass.yml.tpl", {
# EKS_CLUSTER = var.cluster_name
# NODE_ROLE = aws_iam_role.eks_nodes_roles.name
# })

# depends_on = [
# helm_release.karpenter,
# time_sleep.wait_30_seconds_karpenter
# ]
# }

resource "kubectl_manifest" "karpenter-nodeclass" {
yaml_body = <<YAML
apiVersion: karpenter.k8s.aws/v1beta1
apiVersion: karpenter.k8s.aws/v1
kind: EC2NodeClass
metadata:
name: ${var.cluster_name}-default
spec:
amiFamily: AL2
role: role-${var.cluster_name}-${var.environment}-eks-nodes
amiFamily: AL2023
subnetSelectorTerms:
- tags:
karpenter.sh/discovery: "true"
securityGroupSelectorTerms:
- tags:
aws:eks:cluster-name: ${var.cluster_name}
aws:eks:cluster-name: pegasus
role: role-${var.cluster_name}-${var.environment}-eks-nodes
amiSelectorTerms:
- alias: al2023@v20240828
blockDeviceMappings:
- deviceName: /dev/xvda
ebs:
Expand All @@ -104,7 +77,7 @@ YAML

resource "kubectl_manifest" "karpenter-nodepool-default" {
yaml_body = <<YAML
apiVersion: karpenter.sh/v1beta1
apiVersion: karpenter.sh/v1
kind: NodePool
metadata:
name: ${var.cluster_name}-default
Expand All @@ -128,14 +101,14 @@ spec:
operator: In
values: [${join(",", [for az in var.karpenter_azs : "\"${az}\""])}]
nodeClassRef:
apiVersion: karpenter.k8s.aws/v1beta1
group: karpenter.k8s.aws
kind: EC2NodeClass
name: ${var.cluster_name}-default
limits:
cpu: 1000
disruption:
consolidationPolicy: WhenUnderutilized
expireAfter: 72h
consolidationPolicy: WhenEmptyOrUnderutilized
consolidateAfter: 72h
YAML

depends_on = [
Expand Down
2 changes: 1 addition & 1 deletion helm_prometheus.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ resource "helm_release" "prometheus" {
namespace = "prometheus"
create_namespace = true

version = "57.2.0"
version = "62.3.1"


depends_on = [
Expand Down
2 changes: 1 addition & 1 deletion iam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ data "aws_iam_policy_document" "karpenter_controller_assume_role_policy" {
condition {
test = "StringEquals"
variable = "${replace(aws_iam_openid_connect_provider.eks.url, "https://", "")}:sub"
values = ["system:serviceaccount:karpenter:karpenter"]
values = ["system:serviceaccount:kube-system:karpenter"]
}

principals {
Expand Down
4 changes: 3 additions & 1 deletion karpenter/karpenter-controller-trust-policy.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
"iam:TagInstanceProfile",
"iam:DeleteInstanceProfile",
"iam:AddRoleToInstanceProfile",
"iam:RemoveRoleFromInstanceProfile",
"ec2:RunInstances",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
Expand All @@ -24,7 +25,8 @@
"ec2:CreateLaunchTemplate",
"ec2:CreateFleet",
"ec2:DescribeSpotPriceHistory",
"pricing:GetProducts"
"pricing:GetProducts",
"eks:DescribeCluster"
],
"Effect": "Allow",
"Resource": "*",
Expand Down
4 changes: 2 additions & 2 deletions networking.tf
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ resource "aws_route" "public_internet_access" {
## NGW

resource "aws_eip" "vpc_iep_1" {
vpc = true
domain = "vpc"
tags = {
Name = join("-", [var.cluster_name, var.environment, "eip-ngw", var.az1])
Project = "${var.project}"
Expand All @@ -137,7 +137,7 @@ resource "aws_eip" "vpc_iep_1" {
}

resource "aws_eip" "vpc_iep_2" {
vpc = true
domain = "vpc"
tags = {
Name = join("-", [var.cluster_name, var.environment, "eip-ngw", var.az2])
Project = "${var.project}"
Expand Down
2 changes: 1 addition & 1 deletion nlb.tf
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ resource "aws_lb_listener" "ingress_443" {
port = "443"
#protocol = "TCP"
protocol = "TLS"
certificate_arn = "arn:aws:acm:us-east-2:310240692520:certificate/bfbfe3ce-d347-4c42-8986-f45e95e04ca1"
certificate_arn = "arn:aws:acm:us-east-2:310240692520:certificate/82876cec-ac6c-43da-ae3b-6d6ba1ae91e7"
alpn_policy = "HTTP2Preferred"

default_action {
Expand Down
2 changes: 1 addition & 1 deletion provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ terraform {
required_providers {
aws = {
source = "aws"
version = "~> 3.0"
version = "~> 5.0"
}
helm = {
source = "helm"
Expand Down
2 changes: 1 addition & 1 deletion sg.tf
Original file line number Diff line number Diff line change
Expand Up @@ -83,4 +83,4 @@ resource "aws_security_group_rule" "nodeport" {

security_group_id = aws_security_group.cluster_nodes_sg.id
type = "ingress"
}
}
20 changes: 10 additions & 10 deletions variables.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@ az2 = "us-east-2b"

## CLUSTER OPTIONS

k8s_version = "1.29"
k8s_version = "1.30"

endpoint_private_access = true

instance_type = [
"m5.large"
"t3a.medium"
]

desired_size = "1"
Expand All @@ -25,21 +25,21 @@ enabled_cluster_log_types = [
"api", "audit", "authenticator", "controllerManager", "scheduler"
]

addon_csi_version = "v1.29.1-eksbuild.1"
addon_cni_version = "v1.17.1-eksbuild.1"
addon_coredns_version = "v1.11.1-eksbuild.6"
addon_kubeproxy_version = "v1.29.1-eksbuild.2"
addon_csi_version = "v1.34.0-eksbuild.1"
addon_cni_version = "v1.18.3-eksbuild.2"
addon_coredns_version = "v1.11.1-eksbuild.11"
addon_kubeproxy_version = "v1.30.3-eksbuild.2"

## INGRESS OPTIONS (ISTIO NLB)

nlb_ingress_internal = "false"
enable_cross_zone_lb = "true"
nlb_ingress_type = "network"
proxy_protocol_v2 = "false"
grafana_virtual_service_host = "grafana.pauloponciano.digital"
# kiali_virtual_service_host = "kiali.pauloponciano.digital"
# jaeger_virtual_service_host = "jaeger.pauloponciano.digital"
argocd_virtual_service_host = "argocd.pauloponciano.digital"
grafana_virtual_service_host = "grafana.sevira.cloud"
# kiali_virtual_service_host = "kiali.sevira.cloud"
# jaeger_virtual_service_host = "jaeger.sevira.cloud"
argocd_virtual_service_host = "argocd.sevira.cloud"

## KARPENTER OPTIONS

Expand Down

0 comments on commit f91fb10

Please sign in to comment.