Skip to content

Commit

Permalink
#4237 BT metricscaler - Filter cpu query to get the metrics for the c…
Browse files Browse the repository at this point in the history
…orrect resources (#4238)

* #4237 Added a filter to the cpu & storage queries to get the metrics for the correct resources

* fix lint

* fix lint again

Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com>
Co-authored-by: Leah Cole <coleleah@google.com>
  • Loading branch information
3 people authored Jul 16, 2020
1 parent a7feb50 commit 238722a
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 15 deletions.
27 changes: 17 additions & 10 deletions bigtable/metricscaler/metricscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
programmatically scale a Google Cloud Bigtable cluster."""

import argparse
import logging
import os
import time

Expand All @@ -26,8 +27,12 @@

PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']

logger = logging.getLogger('bigtable.metricscaler')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)

def get_cpu_load():

def get_cpu_load(bigtable_instance, bigtable_cluster):
"""Returns the most recent Cloud Bigtable CPU load measurement.
Returns:
Expand All @@ -40,12 +45,13 @@ def get_cpu_load():
metric_type='bigtable.googleapis.com/'
'cluster/cpu_load',
minutes=5)
cpu_query = cpu_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
cpu = next(cpu_query.iter())
return cpu.points[0].value.double_value
# [END bigtable_cpu]


def get_storage_utilization():
def get_storage_utilization(bigtable_instance, bigtable_cluster):
"""Returns the most recent Cloud Bigtable storage utilization measurement.
Returns:
Expand All @@ -58,6 +64,7 @@ def get_storage_utilization():
metric_type='bigtable.googleapis.com/'
'cluster/storage_utilization',
minutes=5)
utilization_query = utilization_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
utilization = next(utilization_query.iter())
return utilization.points[0].value.double_value
# [END bigtable_metric_scaler_storage_utilization]
Expand Down Expand Up @@ -111,15 +118,15 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
current_node_count + size_change_step, max_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled up from {} to {} nodes.'.format(
logger.info('Scaled up from {} to {} nodes.'.format(
current_node_count, new_node_count))
else:
if current_node_count > min_node_count:
new_node_count = max(
current_node_count - size_change_step, min_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled down from {} to {} nodes.'.format(
logger.info('Scaled down from {} to {} nodes.'.format(
current_node_count, new_node_count))
# [END bigtable_scale]

Expand All @@ -145,10 +152,10 @@ def main(
long_sleep (int): How long to sleep after the number of nodes is
changed
"""
cluster_cpu = get_cpu_load()
cluster_storage = get_storage_utilization()
print('Detected cpu of {}'.format(cluster_cpu))
print('Detected storage utilization of {}'.format(cluster_storage))
cluster_cpu = get_cpu_load(bigtable_instance, bigtable_cluster)
cluster_storage = get_storage_utilization(bigtable_instance, bigtable_cluster)
logger.info('Detected cpu of {}'.format(cluster_cpu))
logger.info('Detected storage utilization of {}'.format(cluster_storage))
try:
if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold:
scale_bigtable(bigtable_instance, bigtable_cluster, True)
Expand All @@ -158,10 +165,10 @@ def main(
scale_bigtable(bigtable_instance, bigtable_cluster, False)
time.sleep(long_sleep)
else:
print('CPU within threshold, sleeping.')
logger.info('CPU within threshold, sleeping.')
time.sleep(short_sleep)
except Exception as e:
print("Error during scaling: %s", e)
logger.error("Error during scaling: %s", e)


if __name__ == '__main__':
Expand Down
20 changes: 15 additions & 5 deletions bigtable/metricscaler/metricscaler_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from google.cloud import bigtable
from google.cloud.bigtable import enums
from mock import patch
from mock import Mock, patch

import pytest

Expand All @@ -41,12 +41,18 @@
# System tests to verify API calls succeed


def test_get_cpu_load():
assert float(get_cpu_load()) > 0.0
@patch('metricscaler.query')
def test_get_cpu_load(monitoring_v3_query):
iter_mock = monitoring_v3_query.Query().select_resources().iter
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
assert float(get_cpu_load(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0


def test_get_storage_utilization():
assert float(get_storage_utilization()) > 0.0
@patch('metricscaler.query')
def test_get_storage_utilization(monitoring_v3_query):
iter_mock = monitoring_v3_query.Query().select_resources().iter
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
assert float(get_storage_utilization(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0


@pytest.fixture()
Expand Down Expand Up @@ -198,3 +204,7 @@ def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep):
scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()


if __name__ == '__main__':
test_get_cpu_load()

0 comments on commit 238722a

Please sign in to comment.