Skip to content

Commit

Permalink
Use lazy log format (DataDog#522)
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexandreYang authored Jan 27, 2020
1 parent 5633354 commit f94cff0
Show file tree
Hide file tree
Showing 25 changed files with 120 additions and 136 deletions.
6 changes: 4 additions & 2 deletions .flake8
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# TODO: port this to pyproject.toml when supported: https://gitlab.com/pycqa/flake8/merge_requests/245

[flake8]
select = B,C,E,F,W,B001,B003,B006,B007,B301,B305,B306,B902
ignore = E203,E722,W503
select = B,C,E,F,G,W,B001,B003,B006,B007,B301,B305,B306,B902
# Ignore reasons:
# - G200: logging exception is ok, we don't always want to log the stack trace too
ignore = E203,E722,W503,G200
exclude = .eggs,.tox,build,compat.py,__init__.py,datadog_checks/*/vendor/*
max-line-length = 120
8 changes: 4 additions & 4 deletions aqua/datadog_checks/aqua/aqua.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def check(self, instance):
token = self.get_aqua_token(instance)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=instance_tags)
except Exception as ex:
self.log.error("Failed to get Aqua token, skipping check. Error: %s" % ex)
self.log.error("Failed to get Aqua token, skipping check. Error: %s", ex)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=instance_tags)
return
self._report_base_metrics(instance, token)
Expand Down Expand Up @@ -109,7 +109,7 @@ def _report_base_metrics(self, instance, token):
# io-related exceptions (all those coming from requests are included in that) are handled.
# All other exceptions are raised.
except IOError as ex:
self.log.error("Failed to get base metrics. Some metrics will be missing. Error: %s" % ex)
self.log.error("Failed to get base metrics. Some metrics will be missing. Error: %s", ex)
return

# images
Expand Down Expand Up @@ -155,7 +155,7 @@ def _report_status_metrics(self, instance, token, metric_name, route, statuses):
# io-related exceptions (all those coming from requests are included in that) are handled.
# All other exceptions are raised.
except IOError as ex:
self.log.error("Failed to get %s metrics. Error: %s" % (metric_name, ex))
self.log.error("Failed to get %s metrics. Error: %s", metric_name, ex)
return
for status in statuses:
self.gauge(metric_name, metrics[status], tags=instance.get('tags', []) + ['status:%s' % statuses[status]])
Expand All @@ -169,6 +169,6 @@ def _report_connected_enforcers(self, instance, token):
# io-related exceptions (all those coming from requests are included in that) are handled.
# All other exceptions are raised.
except IOError as ex:
self.log.error("Failed to get enforcer metrics. Error: %s" % ex)
self.log.error("Failed to get enforcer metrics. Error: %s", ex)
return
self.gauge('aqua.enforcers', metrics['count'], tags=instance.get('tags', []) + ['status:all'])
51 changes: 25 additions & 26 deletions eventstore/datadog_checks/eventstore/eventstore.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ def check(self, instance):
# Flatten the self.init_config definitions into valid metric definitions
metric_definitions = {}
for metric in metric_def:
self.log.debug("metric {}".format(metric))
self.log.debug("metric %s", metric)
json_path = metric.get('json_path', '')
self.log.debug("json_path {}".format(json_path))
self.log.debug("json_path %s", json_path)
tags = metric.get('tag_by', {})
self.log.debug("tags {}".format(tags))
self.log.debug("tags %s", tags)
paths = self.get_json_path(json_path, eventstore_paths)
self.log.debug("paths {}".format(paths))
self.log.debug("paths %s", paths)
for path in paths:
# Deep copy needed else it will overwrite previous metric data
metric_builder = copy.deepcopy(metric)
Expand All @@ -73,16 +73,16 @@ def check(self, instance):
# Find metrics to check:
metrics_to_check = {}
for metric in instance['json_path']:
self.log.debug("metric: {}".format(metric))
self.log.debug("metric: %s", metric)
paths = self.get_json_path(metric, eventstore_paths)
self.log.debug("paths: {}".format(paths))
self.log.debug("paths: %s", paths)
for path in paths:
self.log.debug("path: {}".format(path))
self.log.debug("path: %s", path)
try:
metrics_to_check[path] = metric_definitions[path]
self.log.debug("metrics_to_check: {}".format(metric_definitions[path]))
self.log.debug("metrics_to_check: %s", metric_definitions[path])
except KeyError:
self.log.info("Skipping metric: {} as it is not defined".format(path))
self.log.info("Skipping metric: %s as it is not defined", path)

# Now we need to get the metrics from the endpoint
# Get the value for a given key
Expand All @@ -95,8 +95,7 @@ def check(self, instance):
self.dispatch_metric(value, metric)
else:
# self.dispatch_metric(0, metric)
self.log.debug("Metric {} did not return a value, skipping".format(metric['json_path']))
self.log.info("Metric {} did not return a value, skipping".format(metric['json_path']))
self.log.debug("Metric %s did not return a value, skipping", metric['json_path'])

@classmethod
def format_tag(cls, name):
Expand Down Expand Up @@ -144,32 +143,32 @@ def get_tag_path(self, tag, metric_json_path, eventstore_paths):
# No wildcard
return self.get_json_path(tag, eventstore_paths)[0]
except IndexError:
self.log.warn('No tag value found for {}, path {}'.format(tag, metric_json_path))
self.log.warning('No tag value found for %s, path %s', tag, metric_json_path)

def get_json_path(self, json_path, eventstore_paths):
""" Find all the possible keys for a given path """
self.log.debug("json paths: {}".format(json_path))
self.log.debug("eventstore_paths: {}".format(eventstore_paths))
self.log.debug("json paths: %s", json_path)
self.log.debug("eventstore_paths: %s", eventstore_paths)
response = []
self.log.debug("response: {}".format(response))
self.log.debug("response: %s", response)
try:
match = eventstore_paths.index(json_path)
self.log.debug("match: {}".format(match))
self.log.debug("match: %s", match)
if match is not None:
response.append(json_path)
self.log.debug("match json path: {}".format(json_path))
self.log.debug("match json path: %s", json_path)
except ValueError:
# Loop through all possible keys to find matches
# Value Error means it didn't find it, so it must be
# a wildcard
self.log.debug("value error")
for path in eventstore_paths:
self.log.debug("path: {}".format(path))
self.log.debug("path: %s", path)
match = fnmatch.fnmatch(path, json_path)
self.log.debug("match ve: {}".format(match))
self.log.debug("match ve: %s", match)
if match:
response.append(path)
self.log.debug("path ve: {}".format(path))
self.log.debug("path ve: %s", path)
return response

# Fill out eventstore_paths using walk of json
Expand All @@ -189,7 +188,7 @@ def get_value(self, dictionary, metric_path, index=0):
v = 'N/A'
return v
except KeyError:
self.log.info('No value found for Metric: {}'.format(metric_path))
self.log.info('No value found for Metric: %s', metric_path)

def convert_value(self, value, metric):
""" Returns the metric formatted in the specified value"""
Expand Down Expand Up @@ -230,9 +229,9 @@ def convert_to_timedelta(self, string):
td = datetime.timedelta(days=days, seconds=secs, microseconds=subsecs, minutes=mins, hours=hours)
return td
except AttributeError:
self.log.info('Unable to convert {} to timedelta'.format(string))
self.log.info('Unable to convert %s to timedelta', string)
except TypeError:
self.log.info('Unable to convert {} to type timedelta'.format(string))
self.log.info('Unable to convert %s to type timedelta', string)

@classmethod
def _regex_number_to_int(cls, number, group_index):
Expand All @@ -248,10 +247,10 @@ def dispatch_metric(self, value, metric):
tags = metric['tag_by']
metric_name = metric['metric_name']
if metric_type == 'gauge':
self.log.debug("Sending gauge {} v: {} t: {}".format(metric_name, value, tags))
self.log.debug("Sending gauge %s v: %s t: %s", metric_name, value, tags)
self.gauge(metric_name, value, tags)
elif metric_type == 'histogram':
self.log.debug("Sending histogram {} v: {} t: {}".format(metric_name, value, tags))
self.log.debug("Sending histogram %s v: %s t: %s", metric_name, value, tags)
self.histogram(metric_name, value, tags)
else:
self.log.info('Unable to send metric {} due to invalid metric type of {}'.format(metric_name, metric_type))
self.log.info('Unable to send metric %s due to invalid metric type of %s', metric_name, metric_type)
8 changes: 4 additions & 4 deletions filebeat/datadog_checks/filebeat/filebeat.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def _parse_registry_file(self, registry_file_path):
with open(registry_file_path) as registry_file:
return json.load(registry_file)
except IOError as ex:
self.log.error("Cannot read the registry log file at %s: %s" % (registry_file_path, ex))
self.log.error("Cannot read the registry log file at %s: %s", registry_file_path, ex)

if ex.errno == errno.EACCES:
self.log.error(
Expand All @@ -263,9 +263,9 @@ def _process_registry_item(self, item):

self.gauge("filebeat.registry.unprocessed_bytes", unprocessed_bytes, tags=["source:{0}".format(source)])
else:
self.log.debug("Filebeat source %s appears to have changed" % (source,))
self.log.debug("Filebeat source %s appears to have changed", source)
except OSError:
self.log.debug("Unable to get stats on filebeat source %s" % (source,))
self.log.debug("Unable to get stats on filebeat source %s", source)

def _is_same_file(self, stats, file_state_os):
return stats.st_dev == file_state_os["device"] and stats.st_ino == file_state_os["inode"]
Expand All @@ -274,7 +274,7 @@ def _gather_http_profiler_metrics(self, config, profiler):
try:
all_metrics = profiler.gather_metrics()
except Exception as ex:
self.log.error("Error when fetching metrics from %s: %s" % (config.stats_endpoint, ex))
self.log.error("Error when fetching metrics from %s: %s", config.stats_endpoint, ex)
return

tags = ["stats_endpoint:{0}".format(config.stats_endpoint)]
Expand Down
5 changes: 1 addition & 4 deletions lighthouse/datadog_checks/lighthouse/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
from .__about__ import __version__
from .lighthouse import LighthouseCheck

__all__ = [
'__version__',
'LighthouseCheck'
]
__all__ = ['__version__', 'LighthouseCheck']
29 changes: 14 additions & 15 deletions lighthouse/datadog_checks/lighthouse/lighthouse.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import json

from datadog_checks.base import AgentCheck
from datadog_checks.base.errors import CheckException
from datadog_checks.base.utils.common import round_value as round
from datadog_checks.utils.subprocess_output import get_subprocess_output
import json

EXPECTED_RESPONSE_CODE = "NO_ERROR"

Expand All @@ -16,26 +17,21 @@ def check(self, instance):
self.log.error("missing instance url or name")
raise CheckException("missing lighthouse instance url or name, please fix yaml")

cmd = ["lighthouse",
lighthouse_url,
"--output",
"json",
"--quiet",
"--chrome-flags='--headless'"]
cmd = ["lighthouse", lighthouse_url, "--output", "json", "--quiet", "--chrome-flags='--headless'"]

json_string, error_message, exit_code = LighthouseCheck._get_lighthouse_report(cmd, self.log, False)

# check for error since we have raise_on_empty_output set to False
if exit_code > 0:
self.log.error("lighthouse subprocess error {0} exit code {1} for url: {2}"
.format(error_message, exit_code, lighthouse_url)
)
self.log.error(
"lighthouse subprocess error %s exit code %s for url: %s", error_message, exit_code, lighthouse_url
)
raise CheckException(json_string, error_message, exit_code)

try:
data = json.loads(json_string)
except Exception as e:
self.log.warn("lighthouse response JSON different than expected for url: {0}".format(lighthouse_url))
self.log.warning("lighthouse response JSON different than expected for url: %s", lighthouse_url)
raise CheckException(error_message, exit_code, e)

if data.get("runtimeError", {"code": EXPECTED_RESPONSE_CODE}).get("code") == EXPECTED_RESPONSE_CODE:
Expand All @@ -47,15 +43,18 @@ def check(self, instance):
else:
err_code = data.get("runtimeError", {}).get("code")
err_msg = data.get("runtimeError", {}).get("message")
self.log.warn("not collecting lighthouse metrics for url {0} runtimeError code {1} message {2}"
.format(lighthouse_url, err_code, err_msg)
)
self.log.warning(
"not collecting lighthouse metrics for url %s runtimeError code %s message %s",
lighthouse_url,
err_code,
err_msg,
)
return
# add tags

tags = instance.get('tags', [])
if type(tags) != list:
self.log.warn('The tags list in the lighthouse check is not configured properly')
self.log.warning('The tags list in the lighthouse check is not configured properly')
tags = []

tags.append("url:{0}".format(lighthouse_url))
Expand Down
7 changes: 0 additions & 7 deletions lighthouse/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,13 @@
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent lighthouse check',

# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',

# Author details
author='Eric Mustin',
author_email='mustin.eric@gmail.com',

# License
license='BSD-3-Clause',

# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
Expand All @@ -47,13 +43,10 @@
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],

# The package we're going to ship
packages=['datadog_checks.lighthouse'],

# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],

# Extra files to ship with the wheel package
include_package_data=True,
)
5 changes: 3 additions & 2 deletions lighthouse/tests/test_lighthouse.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import os
import pytest

import pytest
from mock import MagicMock
from datadog_checks.lighthouse import LighthouseCheck

from datadog_checks.errors import CheckException
from datadog_checks.lighthouse import LighthouseCheck

HERE = os.path.dirname(os.path.abspath(__file__))

Expand Down
1 change: 1 addition & 0 deletions lighthouse/tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ envlist =
flake8

[testenv]
dd_check_style = true
usedevelop = true
platform = linux|darwin|win32
deps =
Expand Down
8 changes: 5 additions & 3 deletions logstash/datadog_checks/logstash/logstash.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,13 +179,15 @@ def _get_logstash_version(self, config):
version = data['version']
except Exception as e:
self.warning(
"Error while trying to get Logstash version "
"from %s %s. Defaulting to version %s." % (config.url, str(e), self.DEFAULT_VERSION)
"Error while trying to get Logstash version from %s %s. Defaulting to version %s.",
config.url,
e,
self.DEFAULT_VERSION,
)
version = self.DEFAULT_VERSION

self.service_metadata('version', version)
self.log.debug("Logstash version is %s" % version)
self.log.debug("Logstash version is %s", version)
return version

def check(self, instance):
Expand Down
2 changes: 1 addition & 1 deletion neo4j/datadog_checks/neo4j/neo4j.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def _get_version(self, host, port, timeout, auth, service_check_tags):
r.raise_for_status()
stats = r.json()
version = stats.get('neo4j_version')
self.log.debug("Neo4j version: {}".format(version))
self.log.debug("Neo4j version: %s", version)
version = version.split('.')
if version:
return int(version[0])
Expand Down
2 changes: 1 addition & 1 deletion nextcloud/datadog_checks/nextcloud/nextcloud.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def check(self, instance):
auth = (username, password)

try:
self.log.debug("Checking against {}".format(url))
self.log.debug("Checking against %s", url)
response = requests.get(url, auth=auth, headers=headers(self.agentConfig))
if response.status_code != 200:
self.service_check(
Expand Down
Loading

0 comments on commit f94cff0

Please sign in to comment.