Skip to content

Commit 47e198f

Browse files
committed
Merge branch 'EBS_volumes_count_rework' into 'master'
EBS volumes count management improvement See merge request postgres-ai/terraform-postgres-ai-database-lab!44
2 parents f71b2a8 + 8cf2b50 commit 47e198f

File tree

6 files changed

+50
-19
lines changed

6 files changed

+50
-19
lines changed

README.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ Your source PostgreSQL database can be located anywhere, but DLE with other comp
1919
* Read/Write permissions on Cloudwatch
2020

2121
## How to use
22-
- :construction: Currently, it is supposed that you run `terraform` commands on a Linux machine. MacOS and Windows support is not yet implemented (but planned).
22+
- :construction: Currently, it is supposed that you run `terraform` commands on a Linux machine or MacOS. Windows support is not yet implemented (but planned).
2323
- It is recommended to clone this Git repository and adjust for your needs. Below we provide the detailed step-by-step instructions for quick start (see "Quick start") for a PoC setup
2424
- To configure parameters used by Terraform (and the Database Lab Engine itself), you will need to modify `terraform.tfvars` and create a file with secrets (`secret.tfvars`)
2525
- This Terraform module can be run independently or combined with any other standard Terraform module. You can learn more about using Terraform and the Terraform CLI [here](https://www.terraform.io/docs/cli/commands/index.html)
@@ -58,6 +58,7 @@ The following steps were tested on Ubuntu 20.04 but supposed to be valid for oth
5858
aws_deploy_ec2_instance_type = "c5.large"
5959
aws_deploy_ec2_instance_tag_name = "DBLABserver-ec2instance"
6060
aws_deploy_ebs_size = "10"
61+
aws_deploy_ec2_volumes_count = "2"
6162
aws_deploy_ebs_type = "gp2"
6263
aws_deploy_allow_ssh_from_cidrs = ["0.0.0.0/0"]
6364
aws_deploy_dns_api_subdomain = "tf-test" # subdomain in aws.postgres.ai, fqdn will be ${dns_api_subdomain}-engine.aws.postgres
@@ -73,7 +74,12 @@ The following steps were tested on Ubuntu 20.04 but supposed to be valid for oth
7374
7475
platform_project_name = "aws_test_tf"
7576
77+
# list of ssh public keys stored in files
7678
ssh_public_keys_files_list = ["~/.ssh/id_rsa.pub"]
79+
# or provided inline
80+
ssh_public_keys_list = [
81+
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDhbblazDXCFEc21DtFzprWC8DiqidnVRROzp6J6BeJR9+XydPUtl0Rt2mcNvxL5ro5bI9u5JRW8aDd6s+Orpr66hEDdwQTbT1wp5nyduFQcT3rR +aeDSilQvAHjr4/z/GZ6IgZ5MICSIh5hJJagHoxAVqeS9dCA27tv/n2T2XrxIUeBhywH1EmfwrnEw97tHM8F+yegayFDI1nVOUWUIxFMaygMygix8uKbQ2fl4rkkxG2oEx7uyAFMXHt4bewNbZuAp8b/b5ODL6tGHuHhcwfbWGriCO+l7UOf1K9maTx00o4wkzAPyd+qs70y/1iMX2YOOLYaYYdptEnFal2DVoD example@example.com"
82+
]
7783
```
7884
1. Create `secret.tfvars` containing `source_postgres_password`, `platform_access_token`, and `vcs_github_secret_token`. An example:
7985
```config

dle-logical-init.sh.tpl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ case "${source_type}" in
181181
.retrieval.spec.logicalRestore.options.dumpLocation="${source_pgdump_s3_mount_point}/${source_pgdump_path_on_s3_bucket}"
182182
' $dle_config_path/server.yml
183183

184-
nProcessors = $(getconf _NPROCESSORS_ONLN)
184+
nProcessors=$(getconf _NPROCESSORS_ONLN)
185185
yq e -i '
186186
.retrieval.spec.logicalDump.options.parallelJobs=${postgres_dump_parallel_jobs} |
187187
.retrieval.spec.logicalRestore.options.parallelJobs=$nProcessors

instance.tf

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ resource "aws_instance" "aws_ec2" {
4949
dle_verification_token = "${random_string.dle_verification_token.result}"
5050
dle_debug_mode = "${var.dle_debug_mode}"
5151
dle_retrieval_refresh_timetable = "${var.dle_retrieval_refresh_timetable}"
52-
dle_disks = "${join(" ",var.aws_deploy_ec2_volumes_names)}"
5352
dle_version = "${var.dle_version}"
5453
joe_version = "${var.joe_version}"
5554
aws_deploy_dns_zone_name = "${var.aws_deploy_dns_zone_name}"

terraform.tfvars

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ aws_deploy_ec2_instance_type = "c5.large"
99
aws_deploy_ec2_instance_tag_name = "DBLABserver-ec2instance"
1010
aws_deploy_ebs_size = "10"
1111
aws_deploy_ebs_type = "gp2"
12-
aws_deploy_ec2_volumes_names = ["/dev/xvdf", "/dev/xvdg",]
12+
aws_deploy_ec2_volumes_count = "2"
1313
aws_deploy_allow_ssh_from_cidrs = ["0.0.0.0/0"]
1414
aws_deploy_dns_api_subdomain = "tf-test" # subdomain in aws.postgres.ai, fqdn will be ${dns_api_subdomain}.aws.postgres.ai
1515

@@ -29,3 +29,6 @@ platform_project_name = "aws_test_tf"
2929
# have them placed to authorized_keys. Instead of ssh_public_keys_files_list,
3030
# it is possible to use ssh_public_keys_list containing public keys as text values.
3131
ssh_public_keys_files_list = ["~/.ssh/id_rsa.pub"]
32+
ssh_public_keys_list = [
33+
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDhbblazDXCFEc21DtFzprWC8DiqidnVRROzp6J6BeJR9+XydPUtl0Rt2mcNvxL5ro5bI9u5JRW8aDd6s+Orpr66hEDdwQTbT1wp5nyduFQcT3rR+aeDSilQvAHjr4/z/GZ6IgZ5MICSIh5hJJagHoxAVqeS9dCA27tv/n2T2XrxIUeBhywH1EmfwrnEw97tHM8F+yegayFDI1nVOUWUIxFMaygMygix8uKbQ2fl4rkkxG2oEx7uyAFMXHt4bewNbZuAp8b/b5ODL6tGHuHhcwfbWGriCO+l7UOf1K9maTx00o4wkzAPyd+qs70y/1iMX2YOOLYaYYdptEnFal2DVoD example@example.com"
34+
]

variables.tf

Lines changed: 35 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -77,18 +77,9 @@ variable "aws_deploy_ebs_type" {
7777
default = "gp2"
7878
}
7979

80-
# If we need to have more data disks, this array has to be extended.
81-
# TODO: change logic – user sets the number of disks only, not thinking about names
82-
variable "aws_deploy_ec2_volumes_names" {
83-
description = "List of paths for EBS volumes mounts"
84-
# This list is of "non-nitro" instances. For "nitro" ones,
85-
# the real disk names will be different and in fact these names
86-
# will be ignored. However, we still need to pass something here
87-
# to proceed with the disk attachment.
88-
default = [
89-
"/dev/xvdf",
90-
"/dev/xvdg",
91-
]
80+
variable "aws_deploy_ec2_volumes_count" {
81+
description = "Number (from 1 to 22) of EBS volumes attached to EC2 to create ZFS pools"
82+
default = "2"
9283
}
9384

9485
variable "source_postgres_dbname" {
@@ -192,3 +183,35 @@ variable "ssh_public_keys_list"{
192183
description = "List of ssh public keys to copy to the provisioned instance with DLE"
193184
default = []
194185
}
186+
187+
variable "aws_deploy_ec2_ebs_volumes_names" {
188+
description = "List of paths for EBS volumes mounts"
189+
# This list is of "non-nitro" instances. For "nitro" ones,
190+
# the real disk names will be different and in fact these names
191+
# will be ignored. However, we still need to pass something here
192+
# to proceed with the disk attachment.
193+
default = [
194+
"/dev/xvde",
195+
"/dev/xvdf",
196+
"/dev/xvdg",
197+
"/dev/xvdh",
198+
"/dev/xvdi",
199+
"/dev/xvdj",
200+
"/dev/xvdk",
201+
"/dev/xvdl",
202+
"/dev/xvdm",
203+
"/dev/xvdn",
204+
"/dev/xvdo",
205+
"/dev/xvdp",
206+
"/dev/xvdq",
207+
"/dev/xvdr",
208+
"/dev/xvds",
209+
"/dev/xvdt",
210+
"/dev/xvdu",
211+
"/dev/xvdv",
212+
"/dev/xvdw",
213+
"/dev/xvdx",
214+
"/dev/xvdy",
215+
"/dev/xvdz",
216+
]
217+
}

volumes.tf

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
resource "aws_volume_attachment" "ebs_att" {
2-
count = "${length(tolist(var.aws_deploy_ec2_volumes_names))}"
3-
device_name = "${element(var.aws_deploy_ec2_volumes_names, count.index)}"
2+
count = "${var.aws_deploy_ec2_volumes_count}"
3+
device_name = "${element(var.aws_deploy_ec2_ebs_volumes_names, count.index)}"
44
volume_id = "${element(aws_ebs_volume.DLEVolume.*.id, count.index)}"
55
instance_id = "${aws_instance.aws_ec2.id}"
66
force_detach = true
77
}
88

99
resource "aws_ebs_volume" "DLEVolume" {
10-
count = "${length(tolist(var.aws_deploy_ec2_volumes_names))}"
10+
count = "${var.aws_deploy_ec2_volumes_count}"
1111
availability_zone = "${var.aws_deploy_ebs_availability_zone}"
1212
encrypted = "${var.aws_deploy_ebs_encrypted}"
1313
size = "${var.aws_deploy_ebs_size}"

0 commit comments

Comments
 (0)