This repository has been archived by the owner on Mar 8, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaws.yml
136 lines (119 loc) · 4.11 KB
/
aws.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
---
- hosts: 127.0.0.1
connection: local
vars:
asg_id: ee_asg
region: "{{ lookup('env', 'AWS_REGION') or 'eu-west-2' }}"
regions:
eu-west-2:
ami_id: ami-0bb6dea4e7fb55398
# The default VPC
vpc_id: vpc-110bfc78
vpc_id: "{{ regions[region].vpc_id }}"
ami_id: "{{ regions[region].ami_id }}"
lc_num: 109
elb_ssl_arn: "arn:aws:acm:eu-west-2:929325949831:certificate/f353d7fd-fc69-44da-91c5-2bf8225e44ce"
old_lc_num: "{{ lc_num - 1 }}"
aws_env: "{{ lookup('env', 'ENVIRONMENT') or 'test' }}"
environment:
AWS_REGION: "{{ region }}"
tasks:
- ec2_vpc_subnet_facts:
filters:
vpc-id: "{{ vpc_id }}"
register: subnets
- name: ELB security group
ec2_group:
name: "ee-elb-{{ aws_env }}"
description: "ELB http security group"
vpc_id: "{{ vpc_id }}"
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
register: sg_elb
- name: Instance Security Group
ec2_group:
name: "{{ asg_id }}-{{ aws_env }}"
description: "Allow access for HTTP from the ELB"
vpc_id: "{{ vpc_id }}"
rules:
- proto: tcp
from_port: 80
to_port: 80
group_id: "{{ sg_elb.group_id }}"
register: sg_instance
# this will fail first time becasue we're not adding any instances to it :(
- name: Elastic Load Balancer
ec2_elb_lb:
name: "ee-{{ aws_env }}"
state: present
security_group_ids: "{{ sg_elb.group_id }}"
# When the ELB is not having much traffic we'll only have ELB nodes in
# 2 AZs, but instances in 3. This settings makes it send traffic to all
# backend instances in that case
cross_az_load_balancing: True
connection_draining_timeout: 20
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
- protocol: https
load_balancer_port: 443
instance_port: 80
instance_protocol: http
ssl_certificate_id: "{{ elb_ssl_arn }}"
health_check:
ping_protocol: http
ping_port: 80
ping_path: "/reference_definition/"
response_timeout: 2
interval: 30
unhealthy_threshold: 8
healthy_threshold: 2
subnets: "{{ subnets.subnets | map(attribute='id') |list }}"
tags:
Env: "{{ aws_env }}"
Name: "ee_{{ aws_env }}"
register: elb_result
- name: On-demand Launch Config
ec2_lc:
name: "ee_{{ aws_env }}-{{ lc_num }}"
assign_public_ip: yes
image_id: "{{ ami_id }}"
instance_type: t3.medium
security_groups: ["{{ sg_instance.group_id }}", "sg-807bc0e9"]
user_data: "{{lookup('template', 'userdata.yml') }}"
instance_profile_name: ee-packer-ami-builder
register: launchconfig
when: not spot_price is defined
- name: On-demand Autoscaling group
ec2_asg:
name: "{{ asg_id }}-{{ aws_env }}"
state: present
tags:
- Env: "{{ aws_env }}"
Name: "{{ asg_id }}"
desired_capacity: 1
health_check_type: ELB
launch_config_name: "{{launchconfig.name}}"
load_balancers: ["{{ elb_result.elb.name }}"]
max_size: 10
min_size: 1
replace_all_instances: "{{ replace_all|default(False) }}"
termination_policies: [ OldestLaunchConfiguration, ClosestToNextInstanceHour ]
# Yes, subnets go in vpc_zone_identifier. Blame Garethr.
vpc_zone_identifier: "{{ subnets.subnets | map(attribute='id') |list }}"
# Wait for this long to replace old instances
wait_timeout: 600
when: not spot_price is defined
- name: Delete old unused on-demand Launch Configs
ec2_lc:
name: "ee_{{ aws_env }}-{{ old_lc_num }}"
state: absent
- import_playbook: vendor/dc_commonscripts/playbooks/tag_instances.yml asg_id='ee_asg'