From 454caf71801e7e99f4fe37f6acf48b63e36af9e3 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 28 Jun 2017 09:26:33 -0700 Subject: [PATCH] Auto-update dependencies. [(#1005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1005) * Auto-update dependencies. * Fix bigtable lint * Fix IOT iam interaction --- samples/metricscaler/metricscaler.py | 38 +++++++++++------------ samples/metricscaler/metricscaler_test.py | 4 +-- samples/metricscaler/requirements.txt | 2 +- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/samples/metricscaler/metricscaler.py b/samples/metricscaler/metricscaler.py index 8a61ca3eb..486795ce6 100644 --- a/samples/metricscaler/metricscaler.py +++ b/samples/metricscaler/metricscaler.py @@ -22,7 +22,6 @@ from google.cloud import monitoring - def get_cpu_load(): """Returns the most recent Cloud Bigtable CPU load measurement. @@ -51,23 +50,22 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): bigtable_cluster (str): Cloud Bigtable cluster ID to scale scale_up (bool): If true, scale up, otherwise scale down """ - _MIN_NODE_COUNT = 3 - """ - The minimum number of nodes to use. The default minimum is 3. If you have a - lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD - clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used - metric is useful in figuring out the minimum number of nodes. - """ - _MAX_NODE_COUNT = 30 - """ - The maximum number of nodes to use. The default maximum is 30 nodes per zone. - If you need more quota, you can request more by following the instructions - here. - """ + # The minimum number of nodes to use. The default minimum is 3. If you have + # a lot of data, the rule of thumb is to not go below 2.5 TB per node for + # SSD lusters, and 8 TB for HDD. The + # "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring + # out the minimum number of nodes. + min_node_count = 3 + + # The maximum number of nodes to use. The default maximum is 30 nodes per + # zone. If you need more quota, you can request more by following the + # instructions at https://cloud.google.com/bigtable/quota. + max_node_count = 30 + + # The number of nodes to change the cluster by. + size_change_step = 3 - _SIZE_CHANGE_STEP = 3 - """The number of nodes to change the cluster by.""" # [START bigtable_scale] bigtable_client = bigtable.Client(admin=True) instance = bigtable_client.instance(bigtable_instance) @@ -79,16 +77,16 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): current_node_count = cluster.serve_nodes if scale_up: - if current_node_count < _MAX_NODE_COUNT: - new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT) + if current_node_count < max_node_count: + new_node_count = min(current_node_count + 3, max_node_count) cluster.serve_nodes = new_node_count cluster.update() print('Scaled up from {} to {} nodes.'.format( current_node_count, new_node_count)) else: - if current_node_count > _MIN_NODE_COUNT: + if current_node_count > min_node_count: new_node_count = max( - current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT) + current_node_count - size_change_step, min_node_count) cluster.serve_nodes = new_node_count cluster.update() print('Scaled down from {} to {} nodes.'.format( diff --git a/samples/metricscaler/metricscaler_test.py b/samples/metricscaler/metricscaler_test.py index 7a151a0ef..76561ca65 100644 --- a/samples/metricscaler/metricscaler_test.py +++ b/samples/metricscaler/metricscaler_test.py @@ -20,13 +20,13 @@ from google.cloud import bigtable from mock import patch -from metricscaler import _SIZE_CHANGE_STEP from metricscaler import get_cpu_load from metricscaler import main from metricscaler import scale_bigtable # tests assume instance and cluster have the same ID BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER'] +SIZE_CHANGE_STEP = 3 # System tests to verify API calls succeed @@ -50,7 +50,7 @@ def test_scale_bigtable(): cluster.reload() new_node_count = cluster.serve_nodes - assert (new_node_count == (original_node_count + _SIZE_CHANGE_STEP)) + assert (new_node_count == (original_node_count + SIZE_CHANGE_STEP)) scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False) time.sleep(3) diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt index 480c67b22..8153a0272 100644 --- a/samples/metricscaler/requirements.txt +++ b/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==0.24.0 +google-cloud-bigtable==0.25.0 google-cloud-monitoring==0.25.0