forked from redhat-cop/agnosticd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdefault_vars_ec2.yml
217 lines (186 loc) · 6.96 KB
/
default_vars_ec2.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
---
# -------------------------------------------------------------------
# Default Variables for Amazon Web Services
# -------------------------------------------------------------------
# The type of cloud provider this will be deployed to
cloud_provider: ec2
# This is an account that must exist in OpenStack.
# It is used to create projects, access, Heat templates
admin_user: opentlc-mgr
# This is the user that Ansible will use to connect to the nodes it is
# configuring from the admin/control host
ansible_user: ec2-user
# -------------------------------------------------------------------
# AWS Infrastructure
# -------------------------------------------------------------------
# See cloud_providers/ec2_default_vars.yml
# See roles-infra/infra-ec2-project-create/defaults/main.yml
# The region to be used, if not specified by -e in the command line
aws_region: us-east-2
# The availability Zones for which to create worker MachineSets for.
# Leave empty for the default (set up one MachineSet for
# each availability zone).
# Set to 5 entries or less for deployment in Sandboxes (each MachineSet needs an EIP
# and Sandboxes only have 5 EIPs available).
openshift_machineset_aws_zones: []
# openshift_machineset_aws_zones:
# - us-east-1a
# - us-east-1b
# - us-east-1c
# -------------------------------------------------------------------
# Compute Architecture
# -------------------------------------------------------------------
ocp4_architecture_cluster: x86_64
# ocp4_architecture_cluster: arm64
# -------------------------------------------------------------------
# Project Tag
# -------------------------------------------------------------------
project_tag: "{{ env_type }}-{{ guid }}"
# -------------------------------------------------------------------
# AWS Networking
# -------------------------------------------------------------------
### Route 53 Zone ID (AWS)
# This is the Route53 HostedZoneId where you will create your Public DNS entries
# This only needs to be defined if your CF template uses route53
# It needs to match the subdomain_base_suffix below
#HostedZoneId: Z3IHLWJZOU9SRT
## Networking (AWS)
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base_short: "{{ guid }}"
subdomain_base: "{{ guid }}{{ subdomain_base_suffix }}"
zone_internal_dns: "{{ guid }}.internal."
chomped_zone_internal_dns: "{{ guid }}.internal"
bastion_public_dns: "bastion.{{ subdomain_base }}."
bastion_public_dns_chomped: "bastion.{{ subdomain_base }}"
vpcid_name_tag: "{{ subdomain_base }}"
# The OpenShift 4 base domain (must match the HostedZoneId)
ocp4_base_domain: "{{ subdomain_base }}"
# -------------------------------------------------------------------
# AWS EC2 Instances
# -------------------------------------------------------------------
# Bastion Configuration
bastion_instance_type: "t3a.medium"
bastion_instance_image: RHEL8-default
#bastion_instance_platform: Linux/UNIX
# For standard (not GOLD) RHEL images:
#bastion_instance_platform: Red Hat Enterprise Linux
# used for on-demand capacity reservation:
bastion_instance_platform: >-
{%- if 'RHEL' in bastion_instance_image -%}
{%- if 'GOLD' in bastion_instance_image -%}
Linux/UNIX
{%- else -%}
Red Hat Enterprise Linux
{%- endif -%}
{%- else -%}
Linux/UNIX
{%- endif -%}
# Root Filesystem Size
bastion_rootfs_size: 30
# Masters
master_instance_type_family: >-
{{ 'm6g' if ocp4_architecture_cluster is match('arm64')
else 'm5a' if ocp4_architecture_cluster is match('x86_64')
else 'm5a'
}}
master_instance_type_size: >-
{{ 'xlarge' if worker_instance_count|int <= 10
else '2xlarge' if worker_instance_count|int <= 20
else '4xlarge'
}}
master_instance_type: "{{ master_instance_type_family }}.{{ master_instance_type_size }}"
master_instance_count: 3
master_storage_type: >-
{{ 'io1' if worker_instance_count|int >= 10
else 'gp2' }}
# Size in Gigabytes (as an integer)
master_storage_size: 100
# When master_storage_type is io1 or io2, you can set the IOPS.
# You usually want to leave it as the default IOPS value is calculated in the role host-ocp4-installer
# master_storage_iops: 2000
worker_instance_type_family: >-
{{ 'm6g' if ocp4_architecture_cluster is match('arm64')
else 'm5a' if ocp4_architecture_cluster is match('x86_64')
else 'm5a'
}}
worker_instance_type: "{{ master_instance_type_family }}.4xlarge"
worker_instance_count: 2
worker_storage_type: "gp2"
# Size in Gigabytes (as an integer)
worker_storage_size: 100
# Instances to be provisioned
# Provide these as a list.
# Each instance type can have any number of replicas deployed with the same
# configuration.
# Metadata in OpenStack is equivelent to tags in AWS
# These instances will be created with Cinder persistent volumes
instances:
- name: "bastion"
# count: "{{ bastion_instance_count}}"
# unique: "{{ true if clientvm_instance_count | int <= 1 else false }}"
count: 1
unique: true
public_dns: true
alt_name:
- clientvm
image: "{{ bastion_instance_image }}"
flavor:
"ec2": "{{ bastion_instance_type }}"
tags:
- key: "AnsibleGroup"
value: "bastions,clientvms"
- key: "ostype"
value: "linux"
- key: "Purpose"
value: "{{ purpose }}"
- key: "project"
value: "{{ project_tag }}"
- key: "user"
value: "{{ student_name }}"
rootfs_size: "{{ bastion_rootfs_size }}"
security_groups:
- BastionSG
# -------------------------------------------------------------------
# AWS On-demand Capacity
# -------------------------------------------------------------------
# To disable ODCR entirely, just set the following variable to false:
agnosticd_aws_capacity_reservation_enable: true
######################################
# ocp4-cluster for workshops and labs
######################################
# This ODCR config is the one that has the most chances to successfully deploy.
# It has very few constraints and the goal is to avoid Insufficient
# Instance Capacity errors.
#
# - Workers are split on 2 zones, if possible. Can be a single zone.
# - Masters are all in the same zone.
# - Bastion has its own zone, which can also be the same as the other zones,
# because we don't request zones to be distinct.
# Zones can be the same, not necessarly distinct.
agnosticd_aws_capacity_reservation_distinct: false
agnosticd_aws_capacity_reservations:
# Bastion can have its own AZ
az1:
- instance_type: "{{ bastion_instance_type }}"
instance_count: 1
instance_platform: "{{ bastion_instance_platform }}"
masters:
- instance_type: "{{ master_instance_type }}"
instance_count: 3
instance_platform: Linux/UNIX
# Split workers in 2 AZs if possible. Could be the same zone.
workers1:
# Workers: half of workers
- instance_type: "{{ worker_instance_type }}"
instance_count: >-
{{ ( worker_instance_count | int / 2 )
| round(0, 'ceil')
| int }}
instance_platform: Linux/UNIX
workers2:
- instance_type: "{{ worker_instance_type }}"
instance_count: >-
{{ ( worker_instance_count | int / 2 )
| round(0, 'ceil')
| int }}
instance_platform: Linux/UNIX