Skip to content

Commit

Permalink
Upgrading terraform code to support v0.12.3
Browse files Browse the repository at this point in the history
  • Loading branch information
balaguduru committed Jul 18, 2019
1 parent 3e692b7 commit 31c3d78
Show file tree
Hide file tree
Showing 9 changed files with 410 additions and 368 deletions.
2 changes: 1 addition & 1 deletion Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ metadata:
spec:
containers:
- name: ${containerName}
image: gcr.io/pso-helmsman-cicd/jenkins-k8s-node:${env.CONTAINER_VERSION}
image: gcr.io/pso-helmsman-cicd/jenkins-k8s-node:${env.JENKINS_CONTAINER_VERSION}
command: ['cat']
tty: true
volumeMounts:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ The demos in the project demonstrate the following best practices:
## Prerequisites

### Tools
1. [Google Cloud SDK version >= 204.0.0](https://cloud.google.com/sdk/docs/downloads-versioned-archives)
1. [Google Cloud SDK version >= 253.0.0](https://cloud.google.com/sdk/docs/downloads-versioned-archives)
2. [kubectl matching the latest GKE version](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
3. bash or bash compatible shell
4. [jq](https://stedolan.github.io/jq/)
Expand Down
9 changes: 5 additions & 4 deletions gke-to-gke-vpn/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -162,10 +162,11 @@ This project will run on macOS, or in a [Google Cloud Shell](https://cloud.googl

When not using Cloud Shell, the following tools are required.

1. gcloud cli ( >= Google Cloud SDK 200.0.0 )
2. bash
3. kubectl - ( >= v1.10.0-gke.0 )
4. jq
1. [Terraform >= 0.12.3](https://www.terraform.io/downloads.html)
2. gcloud cli ( >= Google Cloud SDK 253.0.0 )
3. bash
4. kubectl - ( >= v1.10.0-gke.0 )
5. jq

### Versions
1. Kubernetes Engine >= 1.10.0-gke.0
Expand Down
195 changes: 101 additions & 94 deletions gke-to-gke-vpn/terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,146 +21,153 @@ Creating Clusters and their Node Pools
# Gets the current version of Kubernetes engine
data "google_container_engine_versions" "gke_version" {
location = "us-east1-b"
project = "${var.project}"
project = var.project
}

// Install the first cluster
resource "google_container_cluster" "cluster-deployment-cluster1" {
name = "cluster-deployment-cluster1"
project = "${var.project}"
location = "${var.cluster1-location}"
network = "${google_compute_network.network1.self_link}"
subnetwork = "${google_compute_subnetwork.subnet1-us-east1.self_link}"
project = var.project
location = var.cluster1-location
network = google_compute_network.network1.self_link
subnetwork = google_compute_subnetwork.subnet1-us-east1.self_link
initial_node_count = "1"
node_locations = []
min_master_version = "${data.google_container_engine_versions.gke_version.latest_master_version}"
min_master_version = data.google_container_engine_versions.gke_version.latest_master_version

ip_allocation_policy {
use_ip_aliases = "true"
cluster_ipv4_cidr_block = "${var.cluster1-cidr}"
services_ipv4_cidr_block = "${var.cluster1-srv-cidr}"
use_ip_aliases = "true"
cluster_ipv4_cidr_block = var.cluster1-cidr
services_ipv4_cidr_block = var.cluster1-srv-cidr
}
}

// Install node-pool for the first cluster. It's recommended by Terraform to be in a seperate block than main cluster
resource "google_container_node_pool" "cluster1_nodes" {
name = "cluster1-nodes"
location = "${var.cluster1-location}"
project = "${var.project}"
cluster = "${google_container_cluster.cluster-deployment-cluster1.name}"
node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]
image_type = "COS"
tags = ["kc-node"]
}
name = "cluster1-nodes"
location = var.cluster1-location
project = var.project
cluster = google_container_cluster.cluster-deployment-cluster1.name

node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]

image_type = "COS"
tags = ["kc-node"]
}
}

// Install the second cluster
resource "google_container_cluster" "cluster-deployment-cluster2" {
name = "cluster-deployment-cluster2"
project = "${var.project}"
location = "${var.cluster2-location}"
network = "${google_compute_network.network1.self_link}"
subnetwork = "${google_compute_subnetwork.subnet2-us-central1.self_link}"
project = var.project
location = var.cluster2-location
network = google_compute_network.network1.self_link
subnetwork = google_compute_subnetwork.subnet2-us-central1.self_link
initial_node_count = "1"
node_locations = []
min_master_version = "${data.google_container_engine_versions.gke_version.latest_master_version}"
min_master_version = data.google_container_engine_versions.gke_version.latest_master_version

ip_allocation_policy {
use_ip_aliases = "true"
cluster_ipv4_cidr_block = "${var.cluster2-cidr}"
services_ipv4_cidr_block = "${var.cluster2-srv-cidr}"
use_ip_aliases = "true"
cluster_ipv4_cidr_block = var.cluster2-cidr
services_ipv4_cidr_block = var.cluster2-srv-cidr
}
}

// Install node-pool for the second cluster.
resource "google_container_node_pool" "cluster2_nodes" {
name = "cluster2-nodes"
location = "${var.cluster2-location}"
project = "${var.project}"
cluster = "${google_container_cluster.cluster-deployment-cluster2.name}"
node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]
image_type = "COS"
tags = ["kc-node"]
}
name = "cluster2-nodes"
location = var.cluster2-location
project = var.project
cluster = google_container_cluster.cluster-deployment-cluster2.name

node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]

image_type = "COS"
tags = ["kc-node"]
}
}

// Install the third cluster
resource "google_container_cluster" "cluster-deployment-cluster3" {
name = "cluster-deployment-cluster3"
project = "${var.project}"
location = "${var.cluster3-location}"
network = "${google_compute_network.network2.self_link}"
subnetwork = "${google_compute_subnetwork.subnet3-us-east1.self_link}"
project = var.project
location = var.cluster3-location
network = google_compute_network.network2.self_link
subnetwork = google_compute_subnetwork.subnet3-us-east1.self_link
initial_node_count = "1"
node_locations = []
min_master_version = "${data.google_container_engine_versions.gke_version.latest_master_version}"
min_master_version = data.google_container_engine_versions.gke_version.latest_master_version

ip_allocation_policy {
use_ip_aliases = "true"
cluster_ipv4_cidr_block = "${var.cluster3-cidr}"
services_ipv4_cidr_block = "${var.cluster3-srv-cidr}"
use_ip_aliases = "true"
cluster_ipv4_cidr_block = var.cluster3-cidr
services_ipv4_cidr_block = var.cluster3-srv-cidr
}
}

// Install node-pool for the third cluster.
resource "google_container_node_pool" "cluster3_nodes" {
name = "cluster3-nodes"
location = "${var.cluster3-location}"
project = "${var.project}"
cluster = "${google_container_cluster.cluster-deployment-cluster3.name}"
node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]
image_type = "COS"
tags = ["kc-node"]
}
name = "cluster3-nodes"
location = var.cluster3-location
project = var.project
cluster = google_container_cluster.cluster-deployment-cluster3.name

node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]

image_type = "COS"
tags = ["kc-node"]
}
}

// Install the forth cluster
resource "google_container_cluster" "cluster-deployment-cluster4" {
name = "cluster-deployment-cluster4"
project = "${var.project}"
location = "${var.cluster4-location}"
network = "${google_compute_network.network2.self_link}"
subnetwork = "${google_compute_subnetwork.subnet4-us-central1.self_link}"
project = var.project
location = var.cluster4-location
network = google_compute_network.network2.self_link
subnetwork = google_compute_subnetwork.subnet4-us-central1.self_link
initial_node_count = "1"
node_locations = []
min_master_version = "${data.google_container_engine_versions.gke_version.latest_master_version}"
min_master_version = data.google_container_engine_versions.gke_version.latest_master_version

ip_allocation_policy {
use_ip_aliases = "true"
cluster_ipv4_cidr_block = "${var.cluster4-cidr}"
services_ipv4_cidr_block = "${var.cluster4-srv-cidr}"
use_ip_aliases = "true"
cluster_ipv4_cidr_block = var.cluster4-cidr
services_ipv4_cidr_block = var.cluster4-srv-cidr
}
}

// Install node-pool for the forth cluster.
resource "google_container_node_pool" "cluster4_nodes" {
name = "cluster4-nodes"
location = "${var.cluster4-location}"
project = "${var.project}"
cluster = "${google_container_cluster.cluster-deployment-cluster4.name}"
node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]
image_type = "COS"
tags = ["kc-node"]
}
}
name = "cluster4-nodes"
location = var.cluster4-location
project = var.project
cluster = google_container_cluster.cluster-deployment-cluster4.name

node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]

image_type = "COS"
tags = ["kc-node"]
}
}
59 changes: 28 additions & 31 deletions gke-to-gke-vpn/terraform/network.tf
Original file line number Diff line number Diff line change
Expand Up @@ -22,73 +22,70 @@ It will setup Networks, IPs and Subnetworks
// Setting up Static IP Addresses
resource "google_compute_address" "vpn1-ip-address" {
name = "vpn1-ip-address"
project = "${var.project}"
region = "${var.region1}"
project = var.project
region = var.region1
}

resource "google_compute_address" "vpn2-ip-address" {
name = "vpn2-ip-address"
project = "${var.project}"
region = "${var.region2}"
project = var.project
region = var.region2
}

resource "google_compute_address" "vpn3-ip-address" {
name = "vpn3-ip-address"
project = "${var.project}"
region = "${var.region1}"
project = var.project
region = var.region1
}

resource "google_compute_address" "vpn4-ip-address" {
name = "vpn4-ip-address"
project = "${var.project}"
region = "${var.region2}"
project = var.project
region = var.region2
}

// Setting up 4 subnets for our 2 networks
resource "google_compute_subnetwork" "subnet1-us-east1" {
name = "subnet1-us-east1"
project = "${var.project}"
ip_cidr_range = "${var.node1-cidr}"
network = "${google_compute_network.network1.self_link}"
region = "${var.region1}"
project = var.project
ip_cidr_range = var.node1-cidr
network = google_compute_network.network1.self_link
region = var.region1
}

resource "google_compute_subnetwork" "subnet2-us-central1" {
name = "subnet2-us-central1"
project = "${var.project}"
ip_cidr_range = "${var.node2-cidr}"
network = "${google_compute_network.network1.self_link}"
region = "${var.region2}"
project = var.project
ip_cidr_range = var.node2-cidr
network = google_compute_network.network1.self_link
region = var.region2
}

resource "google_compute_subnetwork" "subnet3-us-east1" {
name = "subnet3-us-east1"
project = "${var.project}"
ip_cidr_range = "${var.node3-cidr}"
network = "${google_compute_network.network2.self_link}"
region = "${var.region1}"
project = var.project
ip_cidr_range = var.node3-cidr
network = google_compute_network.network2.self_link
region = var.region1
}

resource "google_compute_subnetwork" "subnet4-us-central1" {
name = "subnet4-us-central1"
project = "${var.project}"
ip_cidr_range = "${var.node4-cidr}"
network = "${google_compute_network.network2.self_link}"
region = "${var.region2}"
project = var.project
ip_cidr_range = var.node4-cidr
network = google_compute_network.network2.self_link
region = var.region2
}

// Setting 2 networks
resource "google_compute_network" "network1" {
name = "${var.network1}"
project = "${var.project}"
name = var.network1
project = var.project
auto_create_subnetworks = false
}

resource "google_compute_network" "network2" {
name = "${var.network2}"
project = "${var.project}"
name = var.network2
project = var.project
auto_create_subnetworks = false
}



4 changes: 2 additions & 2 deletions gke-to-gke-vpn/terraform/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@ limitations under the License.
*/

provider "google" {
version = "2.4.0"
region = "${var.region1}"
version = "2.11.0"
region = var.region1
}
Loading

0 comments on commit 31c3d78

Please sign in to comment.