forked from cloudposse/terraform-aws-eks-node-group
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
201 lines (169 loc) · 6.75 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
locals {
enabled = module.this.enabled
node_group_tags = merge(
module.label.tags,
{
"kubernetes.io/cluster/${var.cluster_name}" = "owned"
},
{
"k8s.io/cluster-autoscaler/${var.cluster_name}" = "owned"
},
{
"k8s.io/cluster-autoscaler/enabled" = "${var.enable_cluster_autoscaler}"
}
)
aws_policy_prefix = format("arn:%s:iam::aws:policy", join("", data.aws_partition.current.*.partition))
userdata_vars = {
cluster_name = var.cluster_name
bootstrap_extra_args = var.bootstrap_extra_args
kubelet_extra_args = var.kubelet_extra_args
before_cluster_joining_userdata = var.before_cluster_joining_userdata
after_cluster_joining_userdata = var.after_cluster_joining_userdata
}
# Use a custom launch_template if one was passed as an input
# Otherwise, use the default in this project
launch_template = {
id = coalesce(var.launch_template_id, aws_launch_template.default[0].id)
latest_version = coalesce(var.launch_template_version, aws_launch_template.default[0].latest_version)
}
}
module "label" {
source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=tags/0.19.2"
# Using attributes = ["workers"] would put "workers" before any user-specified attributes.
# While that might be preferable (adding an attribute "blue" would create
# ...name-workers-blue instead of ...name-blue-workers), historically we forced "workers"
# to the end of the attribute list, so we do it again here to maintain compatibility.
attributes = compact(concat(module.this.attributes, ["workers"]))
context = module.this.context
}
data "aws_partition" "current" {
count = local.enabled ? 1 : 0
}
data "aws_iam_policy_document" "assume_role" {
count = local.enabled ? 1 : 0
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
data "aws_iam_policy_document" "amazon_eks_worker_node_autoscaler_policy" {
count = (local.enabled && var.enable_cluster_autoscaler) ? 1 : 0
statement {
sid = "AllowToScaleEKSNodeGroupAutoScalingGroup"
actions = [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
]
resources = [
"*"
]
}
}
resource "aws_iam_policy" "amazon_eks_worker_node_autoscaler_policy" {
count = (local.enabled && var.enable_cluster_autoscaler) ? 1 : 0
name = "${module.label.id}-autoscaler"
path = "/"
policy = join("", data.aws_iam_policy_document.amazon_eks_worker_node_autoscaler_policy.*.json)
}
resource "aws_iam_role" "default" {
count = local.enabled ? 1 : 0
name = module.label.id
assume_role_policy = join("", data.aws_iam_policy_document.assume_role.*.json)
tags = module.label.tags
}
resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" {
count = local.enabled ? 1 : 0
policy_arn = format("%s/%s", local.aws_policy_prefix, "AmazonEKSWorkerNodePolicy")
role = join("", aws_iam_role.default.*.name)
}
resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_autoscaler_policy" {
count = (local.enabled && var.enable_cluster_autoscaler) ? 1 : 0
policy_arn = join("", aws_iam_policy.amazon_eks_worker_node_autoscaler_policy.*.arn)
role = join("", aws_iam_role.default.*.name)
}
resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" {
count = local.enabled ? 1 : 0
policy_arn = format("%s/%s", local.aws_policy_prefix, "AmazonEKS_CNI_Policy")
role = join("", aws_iam_role.default.*.name)
}
resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" {
count = local.enabled ? 1 : 0
policy_arn = format("%s/%s", local.aws_policy_prefix, "AmazonEC2ContainerRegistryReadOnly")
role = join("", aws_iam_role.default.*.name)
}
resource "aws_iam_role_policy_attachment" "existing_policies_for_eks_workers_role" {
count = local.enabled ? var.existing_workers_role_policy_arns_count : 0
policy_arn = var.existing_workers_role_policy_arns[count.index]
role = join("", aws_iam_role.default.*.name)
}
resource "aws_launch_template" "default" {
# We'll use this default if we aren't provided with a launch template during invocation
count = (local.enabled && (var.launch_template_id == null)) ? 1 : 0
block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = var.disk_size
}
}
instance_type = var.instance_types[0]
dynamic "tag_specifications" {
for_each = ["instance", "volume", "elastic-gpu"]
content {
resource_type = tag_specifications.value
tags = local.node_group_tags
}
}
user_data = base64encode(templatefile("${path.module}/userdata.tpl", local.userdata_vars))
}
resource "aws_eks_node_group" "default" {
count = local.enabled ? 1 : 0
cluster_name = var.cluster_name
node_group_name = module.label.id
node_role_arn = join("", aws_iam_role.default.*.arn)
subnet_ids = var.subnet_ids
ami_type = var.ami_type
labels = var.kubernetes_labels
release_version = var.ami_release_version
version = var.kubernetes_version
tags = local.node_group_tags
scaling_config {
desired_size = var.desired_size
max_size = var.max_size
min_size = var.min_size
}
launch_template {
id = local.launch_template.id
version = local.launch_template.latest_version
}
dynamic "remote_access" {
for_each = var.ec2_ssh_key != null && var.ec2_ssh_key != "" ? ["true"] : []
content {
ec2_ssh_key = var.ec2_ssh_key
source_security_group_ids = var.source_security_group_ids
}
}
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [
aws_iam_role_policy_attachment.amazon_eks_worker_node_policy,
aws_iam_role_policy_attachment.amazon_eks_worker_node_autoscaler_policy,
aws_iam_role_policy_attachment.amazon_eks_cni_policy,
aws_iam_role_policy_attachment.amazon_ec2_container_registry_read_only,
# Also allow calling module to create an explicit dependency
# This is useful in conjunction with terraform-aws-eks-cluster to ensure
# the cluster is fully created and configured before creating any node groups
var.module_depends_on
]
lifecycle {
ignore_changes = [scaling_config[0].desired_size]
}
}