From 2a2d842dea3945cef525ff5cc2983d8550810128 Mon Sep 17 00:00:00 2001 From: Paul Date: Mon, 8 Nov 2021 11:25:13 +0100 Subject: [PATCH 01/19] Add legacy note on prometheus tile (#10561) * Add legacy note * Update prometheus/manifest.json --- prometheus/manifest.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prometheus/manifest.json b/prometheus/manifest.json index cc54d1b42dfc3..eb84a2aab15b7 100644 --- a/prometheus/manifest.json +++ b/prometheus/manifest.json @@ -9,7 +9,7 @@ "maintainer": "help@datadoghq.com", "manifest_version": "1.0.0", "name": "prometheus", - "public_title": "Datadog-Prometheus Integration", + "public_title": "Datadog-Prometheus (legacy) Integration", "short_description": "Prometheus is an open source monitoring system for timeseries metric data", "support": "core", "supported_os": [ From 43cad5cbee615d3cb3e07de88cc6b21f69fed228 Mon Sep 17 00:00:00 2001 From: Julia <611228+hithwen@users.noreply.github.com> Date: Mon, 8 Nov 2021 12:04:05 +0100 Subject: [PATCH 02/19] Add attempts (#10565) --- clickhouse/tests/conftest.py | 2 +- datadog_checks_base/tests/conftest.py | 1 + ignite/tests/conftest.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/clickhouse/tests/conftest.py b/clickhouse/tests/conftest.py index f861b714d19d1..b83e39adcf974 100644 --- a/clickhouse/tests/conftest.py +++ b/clickhouse/tests/conftest.py @@ -21,7 +21,7 @@ def dd_environment(): 'clickhouse-0{}'.format(i + 1), 'Logging errors to /var/log/clickhouse-server/clickhouse-server.err.log' ) ) - with docker_run(common.COMPOSE_FILE, conditions=conditions, sleep=10): + with docker_run(common.COMPOSE_FILE, conditions=conditions, sleep=10, attempts=2): yield common.CONFIG diff --git a/datadog_checks_base/tests/conftest.py b/datadog_checks_base/tests/conftest.py index a5543c753da20..29c2a4090acf1 100644 --- a/datadog_checks_base/tests/conftest.py +++ b/datadog_checks_base/tests/conftest.py @@ -52,6 +52,7 @@ def kerberos(): 'WEBPORT': webserver_port, }, conditions=[CheckDockerLogs(compose_file, "ReadyToConnect")], + attempts=2, ): yield common_config diff --git a/ignite/tests/conftest.py b/ignite/tests/conftest.py index 62acc24e5e58b..623604555a077 100644 --- a/ignite/tests/conftest.py +++ b/ignite/tests/conftest.py @@ -52,6 +52,7 @@ def dd_environment(): env_vars=env_vars, conditions=conditions, log_patterns="Ignite node started OK", + attempts=2, ): instance = load_jmx_config() instance['instances'][0]['port'] = 49112 From b52fddba21b262f5787461d0ce610aec0df3929d Mon Sep 17 00:00:00 2001 From: Julia <611228+hithwen@users.noreply.github.com> Date: Mon, 8 Nov 2021 12:04:13 +0100 Subject: [PATCH 03/19] Change mongo tested versions (#10568) --- mongo/tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mongo/tox.ini b/mongo/tox.ini index 89b8094d52220..96c3937b55df2 100644 --- a/mongo/tox.ini +++ b/mongo/tox.ini @@ -2,7 +2,7 @@ minversion = 2.0 basepython = py38 envlist = - py{27,38}-{3.5,4.4} + py{27,38}-{4.4,5.0} [testenv] ensure_default_envdir = true @@ -26,5 +26,5 @@ commands = pytest -v {posargs} setenv = DDEV_SKIP_GENERIC_TAGS_CHECK=true - 3.5: MONGO_VERSION=3.5 4.4: MONGO_VERSION=4.4 + 5.0: MONGO_VERSION=5.0 From 2450988c36925491748a04058e171f39df473f64 Mon Sep 17 00:00:00 2001 From: Julia <611228+hithwen@users.noreply.github.com> Date: Mon, 8 Nov 2021 15:28:04 +0100 Subject: [PATCH 04/19] Add optional assertion (#10574) --- hdfs_datanode/tests/common.py | 3 ++- hdfs_datanode/tests/test_integration.py | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/hdfs_datanode/tests/common.py b/hdfs_datanode/tests/common.py index 2c249e3b5436b..c4619837059b6 100644 --- a/hdfs_datanode/tests/common.py +++ b/hdfs_datanode/tests/common.py @@ -22,6 +22,8 @@ HDFS_RAW_VERSION = os.environ.get('HDFS_RAW_VERSION') HDFS_IMAGE_TAG = os.environ.get('HDFS_IMAGE_TAG') +OPTIONAL_METRICS = ['hdfs.datanode.num_blocks_failed_to_uncache'] + EXPECTED_METRICS = [ 'hdfs.datanode.dfs_remaining', 'hdfs.datanode.dfs_capacity', @@ -33,7 +35,6 @@ 'hdfs.datanode.num_blocks_cached', 'hdfs.datanode.num_failed_volumes', 'hdfs.datanode.num_blocks_failed_to_cache', - # 'hdfs.datanode.num_blocks_failed_to_uncache', metric is flakey in 3.1.3 ] HDFS_DATANODE_CONFIG = {'instances': [{'hdfs_datanode_jmx_uri': DATANODE_URI, 'tags': list(CUSTOM_TAGS)}]} diff --git a/hdfs_datanode/tests/test_integration.py b/hdfs_datanode/tests/test_integration.py index b03e636f135bc..7d7bf36dfc36c 100644 --- a/hdfs_datanode/tests/test_integration.py +++ b/hdfs_datanode/tests/test_integration.py @@ -4,6 +4,8 @@ import mock import pytest +from datadog_checks.dev.utils import get_metadata_metrics + from . import common pytestmark = pytest.mark.integration @@ -18,6 +20,10 @@ def test_check(aggregator, check, dd_run_check, instance): for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) + for metric in common.OPTIONAL_METRICS: + aggregator.assert_metric(metric, at_least=0) + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) @pytest.mark.usefixtures("dd_environment") From 9b1f0b7bf13b4806444b150ac4c173226af27cac Mon Sep 17 00:00:00 2001 From: Andrew Zhang <31313038+yzhan289@users.noreply.github.com> Date: Mon, 8 Nov 2021 09:55:34 -0500 Subject: [PATCH 05/19] Update README templates (#10564) * Update templates * Differentiate between integration type * Change name of links * Fix style * Fix [2] to [3] --- .../dev/tooling/commands/create.py | 2 +- .../datadog_checks/dev/tooling/constants.py | 53 +++++++++++++++++++ .../datadog_checks/dev/tooling/create.py | 9 +++- .../integration/check/{check_name}/README.md | 23 +++----- .../integration/jmx/{check_name}/README.md | 26 ++++----- .../integration/logs/{check_name}/README.md | 11 ++-- .../snmp_tile/snmp_{check_name}/README.md | 6 +-- .../integration/tile/{check_name}/README.md | 5 +- 8 files changed, 86 insertions(+), 49 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py index 6920d6b244ca2..ef027a037c2ae 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py @@ -153,7 +153,7 @@ def create(ctx, name, integration_type, location, non_interactive, quiet, dry_ru f"\n # The project's main homepage." f"\n url='https://github.com/DataDog/integrations-{repo_choice}'," ) - config = construct_template_fields(name, repo_choice, **template_fields) + config = construct_template_fields(name, repo_choice, integration_type, **template_fields) files = create_template_files(integration_type, root, config, read=not dry_run) file_paths = [file.file_path.replace(f'{root}{path_sep}', '', 1) for file in files] diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/constants.py b/datadog_checks_dev/datadog_checks/dev/tooling/constants.py index ff8ca5fe7499d..6903a749871ce 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/constants.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/constants.py @@ -83,6 +83,59 @@ 'wmi_check', ] +CHECK_LINKS = """\ +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/check/datadog_checks/check/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/check/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/check/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ +""" + +LOGS_LINKS = """\ +[1]: https://docs.datadoghq.com/help/ +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[4]: **LINK_TO_INTEGRATION_SITE** +[5]: https://github.com/DataDog/integrations-core/blob/master/logs/assets/service_checks.json +""" + +JMX_LINKS = """\ +[1]: **LINK_TO_INTEGERATION_SITE** +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/integrations-core/blob/master/jmx/datadog_checks/jmx/data/conf.yaml.example +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[5]: https://docs.datadoghq.com/integrations/java/ +[6]: https://docs.datadoghq.com/help/ +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[8]: https://github.com/DataDog/integrations-core/blob/master/jmx/assets/service_checks.json +""" + +SNMP_TILE_LINKS = """\ +[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data +[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup +[3]: https://github.com/DataDog/integrations-core/blob/master/snmp_tile/assets/service_checks.json +[4]: https://docs.datadoghq.com/help/ +[5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ +""" + +TILE_LINKS = """\ +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://docs.datadoghq.com/help/ +""" + +integration_type_links = { + 'check': CHECK_LINKS, + 'logs': LOGS_LINKS, + 'jmx': JMX_LINKS, + 'snmp_tile': SNMP_TILE_LINKS, + 'tile': TILE_LINKS, +} + # If a file changes in a PR with any of these file extensions, # a test will run against the check containing the file TESTABLE_FILE_PATTERNS = ('*.py', '*.ini', '*.in', '*.txt', '*.yml', '*.yaml', '**/tests/*') diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/create.py b/datadog_checks_dev/datadog_checks/dev/tooling/create.py index 14de99f7a3ca1..d93c8741bacb9 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/create.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/create.py @@ -14,6 +14,7 @@ write_file, write_file_binary, ) +from .constants import integration_type_links from .utils import get_license_header, kebab_case_name, normalize_package_name TEMPLATES_DIR = path_join(os.path.dirname(os.path.abspath(__file__)), 'templates', 'integration') @@ -27,7 +28,7 @@ def get_valid_templates(): return sorted(templates) -def construct_template_fields(integration_name, repo_choice, **kwargs): +def construct_template_fields(integration_name, repo_choice, integration_type, **kwargs): normalized_integration_name = normalize_package_name(integration_name) check_name_kebab = kebab_case_name(integration_name) @@ -42,7 +43,7 @@ def construct_template_fields(integration_name, repo_choice, **kwargs): 2. Run `ddev release build {normalized_integration_name}` to build the package. -3. [Download the Datadog Agent](https://app.datadoghq.com/account/settings#agent). +3. [Download the Datadog Agent][2]. 4. Upload the build artifact to any host with an Agent and run `datadog-agent integration install -w @@ -61,6 +62,7 @@ def construct_template_fields(integration_name, repo_choice, **kwargs): support_type = 'core' test_dev_dep = '-e ../datadog_checks_dev' tox_base_dep = '-e../datadog_checks_base[deps]' + integration_links = integration_type_links.get(integration_type) elif repo_choice == 'marketplace': check_name = normalize_package_name(f"{kwargs.get('author')}_{normalized_integration_name}") # Updated by the kwargs passed in @@ -73,6 +75,7 @@ def construct_template_fields(integration_name, repo_choice, **kwargs): support_type = 'partner' test_dev_dep = 'datadog-checks-dev' tox_base_dep = datadog_checks_base_req + integration_links = '' else: check_name = normalized_integration_name author = 'U.N. Owen' @@ -82,6 +85,7 @@ def construct_template_fields(integration_name, repo_choice, **kwargs): support_type = 'contrib' test_dev_dep = 'datadog-checks-dev' tox_base_dep = datadog_checks_base_req + integration_links = integration_type_links.get(integration_type) config = { 'author': author, 'check_class': f"{''.join(part.capitalize() for part in normalized_integration_name.split('_'))}Check", @@ -97,6 +101,7 @@ def construct_template_fields(integration_name, repo_choice, **kwargs): 'support_type': support_type, 'test_dev_dep': test_dev_dep, 'tox_base_dep': tox_base_dep, + 'integration_links': integration_links, } config.update(kwargs) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/README.md b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/README.md index fbdb3eb03e86f..9655b8c858f58 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/README.md +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/README.md @@ -6,7 +6,7 @@ This check monitors [{integration_name}][1] through the Datadog Agent. ## Setup -Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][2] for guidance on applying these instructions. +Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. ### Installation @@ -14,19 +14,19 @@ Follow the instructions below to install and configure this check for an Agent r ### Configuration -1. Edit the `{check_name}.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your {check_name} performance data. See the [sample {check_name}.d/conf.yaml][3] for all available configuration options. +1. Edit the `{check_name}.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your {check_name} performance data. See the [sample {check_name}.d/conf.yaml][4] for all available configuration options. -2. [Restart the Agent][4]. +2. [Restart the Agent][5]. ### Validation -[Run the Agent's status subcommand][5] and look for `{check_name}` under the Checks section. +[Run the Agent's status subcommand][6] and look for `{check_name}` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][6] for a list of metrics provided by this check. +See [metadata.csv][7] for a list of metrics provided by this check. ### Events @@ -36,18 +36,11 @@ The {integration_name} integration does not include any events. The {integration_name} integration does not include any service checks. -See [service_checks.json][7] for a list of service checks provided by this integration. +See [service_checks.json][8] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][8]. +Need help? Contact [Datadog support][9]. -[1]: **LINK_TO_INTEGRATION_SITE** -[2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://github.com/DataDog/integrations-{repo_choice}/blob/master/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[6]: https://github.com/DataDog/integrations-{repo_choice}/blob/master/{check_name}/metadata.csv -[7]: https://github.com/DataDog/integrations-core/blob/master/{check_name}/assets/service_checks.json -[8]: https://docs.datadoghq.com/help/ +{integration_links} \ No newline at end of file diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/README.md b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/README.md index 312a37b5d7bf6..cbb397f88ec99 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/README.md +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/README.md @@ -14,18 +14,18 @@ This check monitors [{integration_name}][1]. 1. Edit the `{check_name}.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your {check_name} performance data. - See the [sample {check_name}.d/conf.yaml][2] for all available configuration options. + See the [sample {check_name}.d/conf.yaml][3] for all available configuration options. - This check has a limit of 350 metrics per instance. The number of returned metrics is indicated when running the Datadog Agent [status command][3]. - You can specify the metrics you are interested in by editing the [configuration][2]. - To learn how to customize the metrics to collect visit the [JMX Checks documentation][4] for more detailed instructions. - If you need to monitor more metrics, contact [Datadog support][5]. + This check has a limit of 350 metrics per instance. The number of returned metrics is indicated when running the Datadog Agent [status command][4]. + You can specify the metrics you are interested in by editing the [configuration][3]. + To learn how to customize the metrics to collect visit the [JMX Checks documentation][5] for more detailed instructions. + If you need to monitor more metrics, contact [Datadog support][6]. -2. [Restart the Agent][6] +2. [Restart the Agent][7] ### Validation -[Run the Agent's `status` subcommand][3] and look for `{check_name}` under the Checks section. +[Run the Agent's `status` subcommand][4] and look for `{check_name}` under the Checks section. ## Data Collected @@ -41,17 +41,11 @@ The {integration_name} integration does not include any events. The {integration_name} integration does not include any service checks. -See [service_checks.json][7] for a list of service checks provided by this integration. +See [service_checks.json][8] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][5]. +Need help? Contact [Datadog support][6]. -[1]: **LINK_TO_INTEGERATION_SITE** -[2]: https://github.com/DataDog/integrations-{repo_choice}/blob/master/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[4]: https://docs.datadoghq.com/integrations/java/ -[5]: https://docs.datadoghq.com/help/ -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[7]: https://github.com/DataDog/integrations-core/blob/master/{check_name}/assets/service_checks.json +{integration_links} \ No newline at end of file diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/README.md b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/README.md index 600d18a61185e..7c896a2891f53 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/README.md +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/README.md @@ -2,7 +2,7 @@ ## Overview -This integration monitors [{integration_name}][3]. +This integration monitors [{integration_name}][4]. ## Setup @@ -45,7 +45,7 @@ This integration monitors [{integration_name}][3]. Change the `path` and `service` parameter values and configure them for your environment. -3. [Restart the Agent][2]. +3. [Restart the Agent][3]. ### Events @@ -55,13 +55,10 @@ The {integration_name} integration does not include any events. The {integration_name} integration does not include any service checks. -See [service_checks.json][4] for a list of service checks provided by this integration. +See [service_checks.json][5] for a list of service checks provided by this integration. ## Troubleshooting Need help? Contact [Datadog support][1]. -[1]: https://docs.datadoghq.com/help/ -[2]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[3]: **LINK_TO_INTEGRATION_SITE** -[4]: https://github.com/DataDog/integrations-core/blob/master/{check_name}/assets/service_checks.json +{integration_links} \ No newline at end of file diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/README.md b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/README.md index c56c58c76398c..855d26b4bc841 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/README.md +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/README.md @@ -36,8 +36,4 @@ Additional helpful documentation, links, and articles: * [Monitor SNMP with Datadog][5] -[1]: https://docs.datadoghq.com/network_performance_monitoring/devices/data -[2]: https://docs.datadoghq.com/network_performance_monitoring/devices/setup -[3]: https://github.com/DataDog/integrations-core/blob/master/{check_name}/assets/service_checks.json -[4]: https://docs.datadoghq.com/help/ -[5]: https://www.datadoghq.com/blog/monitor-snmp-with-datadog/ +{integration_links} diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/README.md b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/README.md index 6b02b514a570a..69535448c2b29 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/README.md +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/README.md @@ -34,7 +34,6 @@ This check monitors [{integration_name}][1]. ## Troubleshooting -Need help? Contact [Datadog support][2]. +Need help? Contact [Datadog support][3]. -[1]: **LINK_TO_INTEGRATION_SITE** -[2]: https://docs.datadoghq.com/help/ +{integration_links} From 34d84441dc9e52c314ec8cc809c18ea6c874ec15 Mon Sep 17 00:00:00 2001 From: gml3ff Date: Mon, 8 Nov 2021 12:05:32 -0500 Subject: [PATCH 06/19] fix autodiscovery typos (#10576) --- mysql/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql/README.md b/mysql/README.md index 8d2d601ea13fe..c498ad9aa5413 100644 --- a/mysql/README.md +++ b/mysql/README.md @@ -270,9 +270,9 @@ kind: Pod metadata: name: mysql annotations: - ad.datadoghq.com/nginx.check_names: '["mysql"]' - ad.datadoghq.com/nginx.init_configs: '[{}]' - ad.datadoghq.com/nginx.instances: | + ad.datadoghq.com/mysql.check_names: '["mysql"]' + ad.datadoghq.com/mysql.init_configs: '[{}]' + ad.datadoghq.com/mysql.instances: | [ { "server": "%%host%%", From 2fa18b792d84f1c3dde2da33d6d922dfb0e3678a Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Mon, 8 Nov 2021 12:41:58 -0500 Subject: [PATCH 07/19] Add runtime configuration validation (#8943) * Sync config models * re-sync * address * sync --- kong/assets/configuration/spec.yaml | 2 +- .../kong/config_models/__init__.py | 18 ++ .../kong/config_models/defaults.py | 268 ++++++++++++++++++ .../kong/config_models/instance.py | 146 ++++++++++ .../kong/config_models/shared.py | 54 ++++ .../kong/config_models/validators.py | 10 + .../kong/data/conf.yaml.example | 9 +- 7 files changed, 502 insertions(+), 5 deletions(-) create mode 100644 kong/datadog_checks/kong/config_models/__init__.py create mode 100644 kong/datadog_checks/kong/config_models/defaults.py create mode 100644 kong/datadog_checks/kong/config_models/instance.py create mode 100644 kong/datadog_checks/kong/config_models/shared.py create mode 100644 kong/datadog_checks/kong/config_models/validators.py diff --git a/kong/assets/configuration/spec.yaml b/kong/assets/configuration/spec.yaml index f289690d9aa6a..b4729245caae8 100644 --- a/kong/assets/configuration/spec.yaml +++ b/kong/assets/configuration/spec.yaml @@ -8,13 +8,13 @@ files: - template: instances options: - name: kong_status_url - required: true description: URL where Kong exposes its status. value: example: http://localhost:8001/status/ type: string - template: instances/openmetrics overrides: + openmetrics_endpoint.required: false openmetrics_endpoint.value.example: http://localhost:8001/metrics extra_metrics.value.example: - kong_memory_workers_lua_vms_bytes: memory.workers.lua.vms.bytes diff --git a/kong/datadog_checks/kong/config_models/__init__.py b/kong/datadog_checks/kong/config_models/__init__.py new file mode 100644 index 0000000000000..ba42dbdc7ffb0 --- /dev/null +++ b/kong/datadog_checks/kong/config_models/__init__.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/kong/datadog_checks/kong/config_models/defaults.py b/kong/datadog_checks/kong/config_models/defaults.py new file mode 100644 index 0000000000000..b22c8ea98847a --- /dev/null +++ b/kong/datadog_checks/kong/config_models/defaults.py @@ -0,0 +1,268 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.utils.models.fields import get_default_field_value + + +def shared_proxy(field, value): + return get_default_field_value(field, value) + + +def shared_service(field, value): + return get_default_field_value(field, value) + + +def shared_skip_proxy(field, value): + return False + + +def shared_timeout(field, value): + return 10 + + +def instance_allow_redirects(field, value): + return True + + +def instance_auth_token(field, value): + return get_default_field_value(field, value) + + +def instance_auth_type(field, value): + return 'basic' + + +def instance_aws_host(field, value): + return get_default_field_value(field, value) + + +def instance_aws_region(field, value): + return get_default_field_value(field, value) + + +def instance_aws_service(field, value): + return get_default_field_value(field, value) + + +def instance_cache_metric_wildcards(field, value): + return True + + +def instance_cache_shared_labels(field, value): + return True + + +def instance_collect_counters_with_distributions(field, value): + return False + + +def instance_collect_histogram_buckets(field, value): + return True + + +def instance_connect_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_disable_generic_tags(field, value): + return False + + +def instance_empty_default_hostname(field, value): + return False + + +def instance_enable_health_service_check(field, value): + return True + + +def instance_exclude_labels(field, value): + return get_default_field_value(field, value) + + +def instance_exclude_metrics(field, value): + return get_default_field_value(field, value) + + +def instance_exclude_metrics_by_labels(field, value): + return get_default_field_value(field, value) + + +def instance_extra_headers(field, value): + return get_default_field_value(field, value) + + +def instance_extra_metrics(field, value): + return get_default_field_value(field, value) + + +def instance_headers(field, value): + return get_default_field_value(field, value) + + +def instance_histogram_buckets_as_distributions(field, value): + return False + + +def instance_hostname_format(field, value): + return get_default_field_value(field, value) + + +def instance_hostname_label(field, value): + return get_default_field_value(field, value) + + +def instance_ignore_tags(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_auth(field, value): + return 'disabled' + + +def instance_kerberos_cache(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_delegate(field, value): + return False + + +def instance_kerberos_force_initiate(field, value): + return False + + +def instance_kerberos_hostname(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_keytab(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_principal(field, value): + return get_default_field_value(field, value) + + +def instance_kong_status_url(field, value): + return 'http://localhost:8001/status/' + + +def instance_log_requests(field, value): + return False + + +def instance_metrics(field, value): + return get_default_field_value(field, value) + + +def instance_min_collection_interval(field, value): + return 15 + + +def instance_namespace(field, value): + return get_default_field_value(field, value) + + +def instance_non_cumulative_histogram_buckets(field, value): + return False + + +def instance_ntlm_domain(field, value): + return get_default_field_value(field, value) + + +def instance_openmetrics_endpoint(field, value): + return 'http://localhost:8001/metrics' + + +def instance_password(field, value): + return get_default_field_value(field, value) + + +def instance_persist_connections(field, value): + return False + + +def instance_proxy(field, value): + return get_default_field_value(field, value) + + +def instance_raw_line_filters(field, value): + return get_default_field_value(field, value) + + +def instance_raw_metric_prefix(field, value): + return get_default_field_value(field, value) + + +def instance_read_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_rename_labels(field, value): + return get_default_field_value(field, value) + + +def instance_request_size(field, value): + return 16 + + +def instance_service(field, value): + return get_default_field_value(field, value) + + +def instance_share_labels(field, value): + return get_default_field_value(field, value) + + +def instance_skip_proxy(field, value): + return False + + +def instance_tags(field, value): + return get_default_field_value(field, value) + + +def instance_telemetry(field, value): + return False + + +def instance_timeout(field, value): + return 10 + + +def instance_tls_ca_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_ignore_warning(field, value): + return False + + +def instance_tls_private_key(field, value): + return get_default_field_value(field, value) + + +def instance_tls_use_host_header(field, value): + return False + + +def instance_tls_verify(field, value): + return True + + +def instance_use_latest_spec(field, value): + return False + + +def instance_use_legacy_auth_encoding(field, value): + return True + + +def instance_username(field, value): + return get_default_field_value(field, value) diff --git a/kong/datadog_checks/kong/config_models/instance.py b/kong/datadog_checks/kong/config_models/instance.py new file mode 100644 index 0000000000000..85c6ffd505e63 --- /dev/null +++ b/kong/datadog_checks/kong/config_models/instance.py @@ -0,0 +1,146 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Any, Mapping, Optional, Sequence, Union + +from pydantic import BaseModel, Extra, Field, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + class Config: + allow_mutation = False + + reader: Optional[Mapping[str, Any]] + writer: Optional[Mapping[str, Any]] + + +class ExtraMetric(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + name: Optional[str] + type: Optional[str] + + +class Metric(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + name: Optional[str] + type: Optional[str] + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class ShareLabel(BaseModel): + class Config: + allow_mutation = False + + labels: Optional[Sequence[str]] + match: Optional[Sequence[str]] + + +class InstanceConfig(BaseModel): + class Config: + allow_mutation = False + + allow_redirects: Optional[bool] + auth_token: Optional[AuthToken] + auth_type: Optional[str] + aws_host: Optional[str] + aws_region: Optional[str] + aws_service: Optional[str] + cache_metric_wildcards: Optional[bool] + cache_shared_labels: Optional[bool] + collect_counters_with_distributions: Optional[bool] + collect_histogram_buckets: Optional[bool] + connect_timeout: Optional[float] + disable_generic_tags: Optional[bool] + empty_default_hostname: Optional[bool] + enable_health_service_check: Optional[bool] + exclude_labels: Optional[Sequence[str]] + exclude_metrics: Optional[Sequence[str]] + exclude_metrics_by_labels: Optional[Mapping[str, Union[bool, Sequence[str]]]] + extra_headers: Optional[Mapping[str, Any]] + extra_metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, ExtraMetric]]]]] + headers: Optional[Mapping[str, Any]] + histogram_buckets_as_distributions: Optional[bool] + hostname_format: Optional[str] + hostname_label: Optional[str] + ignore_tags: Optional[Sequence[str]] + kerberos_auth: Optional[str] + kerberos_cache: Optional[str] + kerberos_delegate: Optional[bool] + kerberos_force_initiate: Optional[bool] + kerberos_hostname: Optional[str] + kerberos_keytab: Optional[str] + kerberos_principal: Optional[str] + kong_status_url: Optional[str] + log_requests: Optional[bool] + metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, Metric]]]]] + min_collection_interval: Optional[float] + namespace: Optional[str] = Field(None, regex='\\w*') + non_cumulative_histogram_buckets: Optional[bool] + ntlm_domain: Optional[str] + openmetrics_endpoint: Optional[str] + password: Optional[str] + persist_connections: Optional[bool] + proxy: Optional[Proxy] + raw_line_filters: Optional[Sequence[str]] + raw_metric_prefix: Optional[str] + read_timeout: Optional[float] + rename_labels: Optional[Mapping[str, Any]] + request_size: Optional[float] + service: Optional[str] + share_labels: Optional[Mapping[str, Union[bool, ShareLabel]]] + skip_proxy: Optional[bool] + tags: Optional[Sequence[str]] + telemetry: Optional[bool] + timeout: Optional[float] + tls_ca_cert: Optional[str] + tls_cert: Optional[str] + tls_ignore_warning: Optional[bool] + tls_private_key: Optional[str] + tls_use_host_header: Optional[bool] + tls_verify: Optional[bool] + use_latest_spec: Optional[bool] + use_legacy_auth_encoding: Optional[bool] + username: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'instance_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'instance_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values)) diff --git a/kong/datadog_checks/kong/config_models/shared.py b/kong/datadog_checks/kong/config_models/shared.py new file mode 100644 index 0000000000000..4fc6216ab6c2f --- /dev/null +++ b/kong/datadog_checks/kong/config_models/shared.py @@ -0,0 +1,54 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class SharedConfig(BaseModel): + class Config: + allow_mutation = False + + proxy: Optional[Proxy] + service: Optional[str] + skip_proxy: Optional[bool] + timeout: Optional[float] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'shared_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'shared_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values)) diff --git a/kong/datadog_checks/kong/config_models/validators.py b/kong/datadog_checks/kong/config_models/validators.py new file mode 100644 index 0000000000000..0af13b2e555d0 --- /dev/null +++ b/kong/datadog_checks/kong/config_models/validators.py @@ -0,0 +1,10 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + + +def initialize_instance(values, **kwargs): + if 'kong_status_url' not in values and 'openmetrics_endpoint' not in values: + raise ValueError('Field `kong_status_url` or `openmetrics_endpoint` must be set') + + return values diff --git a/kong/datadog_checks/kong/data/conf.yaml.example b/kong/datadog_checks/kong/data/conf.yaml.example index 0f7a99d970cb4..d3b498a878a1b 100644 --- a/kong/datadog_checks/kong/data/conf.yaml.example +++ b/kong/datadog_checks/kong/data/conf.yaml.example @@ -45,15 +45,16 @@ init_config: # instances: - ## @param kong_status_url - string - required + - + ## @param kong_status_url - string - optional - default: http://localhost:8001/status/ ## URL where Kong exposes its status. # - - kong_status_url: http://localhost:8001/status/ + # kong_status_url: http://localhost:8001/status/ - ## @param openmetrics_endpoint - string - required + ## @param openmetrics_endpoint - string - optional - default: http://localhost:8001/metrics ## The URL exposing metrics in the OpenMetrics format. # - openmetrics_endpoint: http://localhost:8001/metrics + # openmetrics_endpoint: http://localhost:8001/metrics ## @param raw_metric_prefix - string - optional ## A prefix that will be removed from all exposed metric names, if present. From 91d30a8cb4e847d420bf18586a6664b8346bd69b Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Mon, 8 Nov 2021 12:42:31 -0500 Subject: [PATCH 08/19] Add runtime configuration validation (#8969) * Sync config models * re-sync * re-sync * address * Update validators.py --- php_fpm/assets/configuration/spec.yaml | 4 - .../php_fpm/config_models/__init__.py | 18 ++ .../php_fpm/config_models/defaults.py | 192 ++++++++++++++++++ .../php_fpm/config_models/instance.py | 101 +++++++++ .../php_fpm/config_models/shared.py | 54 +++++ .../php_fpm/config_models/validators.py | 10 + .../php_fpm/data/conf.yaml.example | 17 +- 7 files changed, 384 insertions(+), 12 deletions(-) create mode 100644 php_fpm/datadog_checks/php_fpm/config_models/__init__.py create mode 100644 php_fpm/datadog_checks/php_fpm/config_models/defaults.py create mode 100644 php_fpm/datadog_checks/php_fpm/config_models/instance.py create mode 100644 php_fpm/datadog_checks/php_fpm/config_models/shared.py create mode 100644 php_fpm/datadog_checks/php_fpm/config_models/validators.py diff --git a/php_fpm/assets/configuration/spec.yaml b/php_fpm/assets/configuration/spec.yaml index 3d2c1cb4b6d0d..bbff03e17f400 100644 --- a/php_fpm/assets/configuration/spec.yaml +++ b/php_fpm/assets/configuration/spec.yaml @@ -18,7 +18,6 @@ files: catch these URLs and redirect them through the FPM pool target you want to monitor (FPM `listen` directive in the config, usually a UNIX socket or TCP socket. - required: true value: type: string example: http://localhost/status @@ -32,19 +31,16 @@ files: catch these URLs and redirect them through the FPM pool target you want to monitor (FPM `listen` directive in the config, usually a UNIX socket or TCP socket. - required: true value: type: string example: http://localhost/ping - name: ping_reply description: Set the expected reply to the ping. - required: true value: type: string example: pong - name: use_fastcgi description: Communicate directly with PHP-FPM using FastCGI - required: true value: type: boolean example: false diff --git a/php_fpm/datadog_checks/php_fpm/config_models/__init__.py b/php_fpm/datadog_checks/php_fpm/config_models/__init__.py new file mode 100644 index 0000000000000..ba42dbdc7ffb0 --- /dev/null +++ b/php_fpm/datadog_checks/php_fpm/config_models/__init__.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/php_fpm/datadog_checks/php_fpm/config_models/defaults.py b/php_fpm/datadog_checks/php_fpm/config_models/defaults.py new file mode 100644 index 0000000000000..909bb1d508963 --- /dev/null +++ b/php_fpm/datadog_checks/php_fpm/config_models/defaults.py @@ -0,0 +1,192 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.utils.models.fields import get_default_field_value + + +def shared_proxy(field, value): + return get_default_field_value(field, value) + + +def shared_service(field, value): + return get_default_field_value(field, value) + + +def shared_skip_proxy(field, value): + return False + + +def shared_timeout(field, value): + return 10 + + +def instance_allow_redirects(field, value): + return True + + +def instance_auth_token(field, value): + return get_default_field_value(field, value) + + +def instance_auth_type(field, value): + return 'basic' + + +def instance_aws_host(field, value): + return get_default_field_value(field, value) + + +def instance_aws_region(field, value): + return get_default_field_value(field, value) + + +def instance_aws_service(field, value): + return get_default_field_value(field, value) + + +def instance_connect_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_disable_generic_tags(field, value): + return False + + +def instance_empty_default_hostname(field, value): + return False + + +def instance_extra_headers(field, value): + return get_default_field_value(field, value) + + +def instance_headers(field, value): + return get_default_field_value(field, value) + + +def instance_http_host(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_auth(field, value): + return 'disabled' + + +def instance_kerberos_cache(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_delegate(field, value): + return False + + +def instance_kerberos_force_initiate(field, value): + return False + + +def instance_kerberos_hostname(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_keytab(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_principal(field, value): + return get_default_field_value(field, value) + + +def instance_log_requests(field, value): + return False + + +def instance_min_collection_interval(field, value): + return 15 + + +def instance_ntlm_domain(field, value): + return get_default_field_value(field, value) + + +def instance_password(field, value): + return get_default_field_value(field, value) + + +def instance_persist_connections(field, value): + return False + + +def instance_ping_reply(field, value): + return 'pong' + + +def instance_ping_url(field, value): + return 'http://localhost/ping' + + +def instance_proxy(field, value): + return get_default_field_value(field, value) + + +def instance_read_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_request_size(field, value): + return 16 + + +def instance_service(field, value): + return get_default_field_value(field, value) + + +def instance_skip_proxy(field, value): + return False + + +def instance_status_url(field, value): + return 'http://localhost/status' + + +def instance_tags(field, value): + return get_default_field_value(field, value) + + +def instance_timeout(field, value): + return 10 + + +def instance_tls_ca_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_ignore_warning(field, value): + return False + + +def instance_tls_private_key(field, value): + return get_default_field_value(field, value) + + +def instance_tls_use_host_header(field, value): + return False + + +def instance_tls_verify(field, value): + return True + + +def instance_use_fastcgi(field, value): + return False + + +def instance_use_legacy_auth_encoding(field, value): + return True + + +def instance_username(field, value): + return get_default_field_value(field, value) diff --git a/php_fpm/datadog_checks/php_fpm/config_models/instance.py b/php_fpm/datadog_checks/php_fpm/config_models/instance.py new file mode 100644 index 0000000000000..254ae1a81daf6 --- /dev/null +++ b/php_fpm/datadog_checks/php_fpm/config_models/instance.py @@ -0,0 +1,101 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Any, Mapping, Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + class Config: + allow_mutation = False + + reader: Optional[Mapping[str, Any]] + writer: Optional[Mapping[str, Any]] + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class InstanceConfig(BaseModel): + class Config: + allow_mutation = False + + allow_redirects: Optional[bool] + auth_token: Optional[AuthToken] + auth_type: Optional[str] + aws_host: Optional[str] + aws_region: Optional[str] + aws_service: Optional[str] + connect_timeout: Optional[float] + disable_generic_tags: Optional[bool] + empty_default_hostname: Optional[bool] + extra_headers: Optional[Mapping[str, Any]] + headers: Optional[Mapping[str, Any]] + http_host: Optional[str] + kerberos_auth: Optional[str] + kerberos_cache: Optional[str] + kerberos_delegate: Optional[bool] + kerberos_force_initiate: Optional[bool] + kerberos_hostname: Optional[str] + kerberos_keytab: Optional[str] + kerberos_principal: Optional[str] + log_requests: Optional[bool] + min_collection_interval: Optional[float] + ntlm_domain: Optional[str] + password: Optional[str] + persist_connections: Optional[bool] + ping_reply: Optional[str] + ping_url: Optional[str] + proxy: Optional[Proxy] + read_timeout: Optional[float] + request_size: Optional[float] + service: Optional[str] + skip_proxy: Optional[bool] + status_url: Optional[str] + tags: Optional[Sequence[str]] + timeout: Optional[float] + tls_ca_cert: Optional[str] + tls_cert: Optional[str] + tls_ignore_warning: Optional[bool] + tls_private_key: Optional[str] + tls_use_host_header: Optional[bool] + tls_verify: Optional[bool] + use_fastcgi: Optional[bool] + use_legacy_auth_encoding: Optional[bool] + username: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'instance_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'instance_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values)) diff --git a/php_fpm/datadog_checks/php_fpm/config_models/shared.py b/php_fpm/datadog_checks/php_fpm/config_models/shared.py new file mode 100644 index 0000000000000..4fc6216ab6c2f --- /dev/null +++ b/php_fpm/datadog_checks/php_fpm/config_models/shared.py @@ -0,0 +1,54 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class SharedConfig(BaseModel): + class Config: + allow_mutation = False + + proxy: Optional[Proxy] + service: Optional[str] + skip_proxy: Optional[bool] + timeout: Optional[float] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'shared_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'shared_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values)) diff --git a/php_fpm/datadog_checks/php_fpm/config_models/validators.py b/php_fpm/datadog_checks/php_fpm/config_models/validators.py new file mode 100644 index 0000000000000..b21afd4b33ac0 --- /dev/null +++ b/php_fpm/datadog_checks/php_fpm/config_models/validators.py @@ -0,0 +1,10 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + + +def initialize_instance(values, **kwargs): + if 'status_url' not in values and 'ping_url' not in values: + raise ValueError('Field `status_url` or `ping_url` must be set') + + return values diff --git a/php_fpm/datadog_checks/php_fpm/data/conf.yaml.example b/php_fpm/datadog_checks/php_fpm/data/conf.yaml.example index dd58f26ccfadf..fa555a769a07e 100644 --- a/php_fpm/datadog_checks/php_fpm/data/conf.yaml.example +++ b/php_fpm/datadog_checks/php_fpm/data/conf.yaml.example @@ -45,7 +45,8 @@ init_config: # instances: - ## @param status_url - string - required + - + ## @param status_url - string - optional - default: http://localhost/status ## Get metrics from your FPM pool with this URL ## The status URLs should follow the options from your FPM pool ## See http://php.net/manual/en/install.fpm.configuration.php @@ -55,9 +56,9 @@ instances: ## you want to monitor (FPM `listen` directive in the config, usually ## a UNIX socket or TCP socket. # - - status_url: http://localhost/status + # status_url: http://localhost/status - ## @param ping_url - string - required + ## @param ping_url - string - optional - default: http://localhost/ping ## Get a reliable service check of your FPM pool with `ping_url` parameter ## The ping URLs should follow the options from your FPM pool ## See http://php.net/manual/en/install.fpm.configuration.php @@ -67,17 +68,17 @@ instances: ## you want to monitor (FPM `listen` directive in the config, usually ## a UNIX socket or TCP socket. # - ping_url: http://localhost/ping + # ping_url: http://localhost/ping - ## @param ping_reply - string - required + ## @param ping_reply - string - optional - default: pong ## Set the expected reply to the ping. # - ping_reply: pong + # ping_reply: pong - ## @param use_fastcgi - boolean - required + ## @param use_fastcgi - boolean - optional - default: false ## Communicate directly with PHP-FPM using FastCGI # - use_fastcgi: false + # use_fastcgi: false ## @param http_host - string - optional ## If your FPM pool is only accessible via a specific HTTP vhost, you can From c04dd96cfbef1a8d2c118e43f02570e84fa872e8 Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Mon, 8 Nov 2021 12:43:53 -0500 Subject: [PATCH 09/19] Add runtime configuration validation (#8916) * Sync config models * re-sync * re-sync * fix tests --- .../fluentd/config_models/__init__.py | 18 ++ .../fluentd/config_models/defaults.py | 188 ++++++++++++++++++ .../fluentd/config_models/instance.py | 100 ++++++++++ .../fluentd/config_models/shared.py | 55 +++++ .../fluentd/config_models/validators.py | 3 + fluentd/tests/test_integration_and_e2e.py | 6 +- 6 files changed, 367 insertions(+), 3 deletions(-) create mode 100644 fluentd/datadog_checks/fluentd/config_models/__init__.py create mode 100644 fluentd/datadog_checks/fluentd/config_models/defaults.py create mode 100644 fluentd/datadog_checks/fluentd/config_models/instance.py create mode 100644 fluentd/datadog_checks/fluentd/config_models/shared.py create mode 100644 fluentd/datadog_checks/fluentd/config_models/validators.py diff --git a/fluentd/datadog_checks/fluentd/config_models/__init__.py b/fluentd/datadog_checks/fluentd/config_models/__init__.py new file mode 100644 index 0000000000000..ba42dbdc7ffb0 --- /dev/null +++ b/fluentd/datadog_checks/fluentd/config_models/__init__.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/fluentd/datadog_checks/fluentd/config_models/defaults.py b/fluentd/datadog_checks/fluentd/config_models/defaults.py new file mode 100644 index 0000000000000..0143c3607248f --- /dev/null +++ b/fluentd/datadog_checks/fluentd/config_models/defaults.py @@ -0,0 +1,188 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.utils.models.fields import get_default_field_value + + +def shared_fluentd(field, value): + return 'fluentd' + + +def shared_proxy(field, value): + return get_default_field_value(field, value) + + +def shared_service(field, value): + return get_default_field_value(field, value) + + +def shared_skip_proxy(field, value): + return False + + +def shared_timeout(field, value): + return 10 + + +def instance_allow_redirects(field, value): + return True + + +def instance_auth_token(field, value): + return get_default_field_value(field, value) + + +def instance_auth_type(field, value): + return 'basic' + + +def instance_aws_host(field, value): + return get_default_field_value(field, value) + + +def instance_aws_region(field, value): + return get_default_field_value(field, value) + + +def instance_aws_service(field, value): + return get_default_field_value(field, value) + + +def instance_connect_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_disable_generic_tags(field, value): + return False + + +def instance_empty_default_hostname(field, value): + return False + + +def instance_extra_headers(field, value): + return get_default_field_value(field, value) + + +def instance_fluentd(field, value): + return 'fluentd' + + +def instance_headers(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_auth(field, value): + return 'disabled' + + +def instance_kerberos_cache(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_delegate(field, value): + return False + + +def instance_kerberos_force_initiate(field, value): + return False + + +def instance_kerberos_hostname(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_keytab(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_principal(field, value): + return get_default_field_value(field, value) + + +def instance_log_requests(field, value): + return False + + +def instance_min_collection_interval(field, value): + return 15 + + +def instance_ntlm_domain(field, value): + return get_default_field_value(field, value) + + +def instance_password(field, value): + return get_default_field_value(field, value) + + +def instance_persist_connections(field, value): + return False + + +def instance_plugin_ids(field, value): + return get_default_field_value(field, value) + + +def instance_proxy(field, value): + return get_default_field_value(field, value) + + +def instance_read_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_request_size(field, value): + return 16 + + +def instance_service(field, value): + return get_default_field_value(field, value) + + +def instance_skip_proxy(field, value): + return False + + +def instance_tag_by(field, value): + return 'plugin_id' + + +def instance_tags(field, value): + return get_default_field_value(field, value) + + +def instance_timeout(field, value): + return 10 + + +def instance_tls_ca_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_ignore_warning(field, value): + return False + + +def instance_tls_private_key(field, value): + return get_default_field_value(field, value) + + +def instance_tls_use_host_header(field, value): + return False + + +def instance_tls_verify(field, value): + return True + + +def instance_use_legacy_auth_encoding(field, value): + return True + + +def instance_username(field, value): + return get_default_field_value(field, value) diff --git a/fluentd/datadog_checks/fluentd/config_models/instance.py b/fluentd/datadog_checks/fluentd/config_models/instance.py new file mode 100644 index 0000000000000..2cd726f647e8a --- /dev/null +++ b/fluentd/datadog_checks/fluentd/config_models/instance.py @@ -0,0 +1,100 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Any, Mapping, Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + class Config: + allow_mutation = False + + reader: Optional[Mapping[str, Any]] + writer: Optional[Mapping[str, Any]] + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class InstanceConfig(BaseModel): + class Config: + allow_mutation = False + + allow_redirects: Optional[bool] + auth_token: Optional[AuthToken] + auth_type: Optional[str] + aws_host: Optional[str] + aws_region: Optional[str] + aws_service: Optional[str] + connect_timeout: Optional[float] + disable_generic_tags: Optional[bool] + empty_default_hostname: Optional[bool] + extra_headers: Optional[Mapping[str, Any]] + fluentd: Optional[str] + headers: Optional[Mapping[str, Any]] + kerberos_auth: Optional[str] + kerberos_cache: Optional[str] + kerberos_delegate: Optional[bool] + kerberos_force_initiate: Optional[bool] + kerberos_hostname: Optional[str] + kerberos_keytab: Optional[str] + kerberos_principal: Optional[str] + log_requests: Optional[bool] + min_collection_interval: Optional[float] + monitor_agent_url: str + ntlm_domain: Optional[str] + password: Optional[str] + persist_connections: Optional[bool] + plugin_ids: Optional[Sequence[str]] + proxy: Optional[Proxy] + read_timeout: Optional[float] + request_size: Optional[float] + service: Optional[str] + skip_proxy: Optional[bool] + tag_by: Optional[str] + tags: Optional[Sequence[str]] + timeout: Optional[float] + tls_ca_cert: Optional[str] + tls_cert: Optional[str] + tls_ignore_warning: Optional[bool] + tls_private_key: Optional[str] + tls_use_host_header: Optional[bool] + tls_verify: Optional[bool] + use_legacy_auth_encoding: Optional[bool] + username: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'instance_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'instance_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values)) diff --git a/fluentd/datadog_checks/fluentd/config_models/shared.py b/fluentd/datadog_checks/fluentd/config_models/shared.py new file mode 100644 index 0000000000000..7fe21e33650f0 --- /dev/null +++ b/fluentd/datadog_checks/fluentd/config_models/shared.py @@ -0,0 +1,55 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class SharedConfig(BaseModel): + class Config: + allow_mutation = False + + fluentd: Optional[str] + proxy: Optional[Proxy] + service: Optional[str] + skip_proxy: Optional[bool] + timeout: Optional[float] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'shared_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'shared_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values)) diff --git a/fluentd/datadog_checks/fluentd/config_models/validators.py b/fluentd/datadog_checks/fluentd/config_models/validators.py new file mode 100644 index 0000000000000..9d0b0155542cb --- /dev/null +++ b/fluentd/datadog_checks/fluentd/config_models/validators.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/fluentd/tests/test_integration_and_e2e.py b/fluentd/tests/test_integration_and_e2e.py index 291209048aefb..20ae1aae321af 100644 --- a/fluentd/tests/test_integration_and_e2e.py +++ b/fluentd/tests/test_integration_and_e2e.py @@ -25,11 +25,11 @@ def assert_basic_case(aggregator): @pytest.mark.integration @pytest.mark.usefixtures("dd_environment") -def test_basic_case_integration(aggregator): +def test_basic_case_integration(aggregator, dd_run_check): instance = copy.deepcopy(INSTANCE_WITH_PLUGIN) check = Fluentd(CHECK_NAME, {}, [instance]) - check.check(None) - check.check(None) + dd_run_check(check) + dd_run_check(check) assert_basic_case(aggregator) From 2279e152183053d67cd965624d288db052c6ff00 Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Mon, 8 Nov 2021 12:45:41 -0500 Subject: [PATCH 10/19] Add runtime configuration validation (#8901) * Sync config models * re-sync * re-sync * address Co-authored-by: Paul Coignet --- couch/assets/configuration/spec.yaml | 2 - .../couch/config_models/__init__.py | 18 ++ .../couch/config_models/defaults.py | 192 ++++++++++++++++++ .../couch/config_models/instance.py | 102 ++++++++++ .../couch/config_models/shared.py | 54 +++++ .../couch/config_models/validators.py | 3 + .../couch/data/conf.yaml.example | 2 +- 7 files changed, 370 insertions(+), 3 deletions(-) create mode 100644 couch/datadog_checks/couch/config_models/__init__.py create mode 100644 couch/datadog_checks/couch/config_models/defaults.py create mode 100644 couch/datadog_checks/couch/config_models/instance.py create mode 100644 couch/datadog_checks/couch/config_models/shared.py create mode 100644 couch/datadog_checks/couch/config_models/validators.py diff --git a/couch/assets/configuration/spec.yaml b/couch/assets/configuration/spec.yaml index 34efe19bf4ef3..24a8c36648495 100644 --- a/couch/assets/configuration/spec.yaml +++ b/couch/assets/configuration/spec.yaml @@ -30,7 +30,6 @@ files: example: - - - display_default: all - name: db_exclude description: | The `db_exclude` should contain the names of any databases meant to be excluded @@ -43,7 +42,6 @@ files: example: - - - display_default: null - name: max_dbs_per_check description: Number of databases to scan per check. value: diff --git a/couch/datadog_checks/couch/config_models/__init__.py b/couch/datadog_checks/couch/config_models/__init__.py new file mode 100644 index 0000000000000..ba42dbdc7ffb0 --- /dev/null +++ b/couch/datadog_checks/couch/config_models/__init__.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/couch/datadog_checks/couch/config_models/defaults.py b/couch/datadog_checks/couch/config_models/defaults.py new file mode 100644 index 0000000000000..2adeb81f4ed88 --- /dev/null +++ b/couch/datadog_checks/couch/config_models/defaults.py @@ -0,0 +1,192 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.utils.models.fields import get_default_field_value + + +def shared_proxy(field, value): + return get_default_field_value(field, value) + + +def shared_service(field, value): + return get_default_field_value(field, value) + + +def shared_skip_proxy(field, value): + return False + + +def shared_timeout(field, value): + return 10 + + +def instance_allow_redirects(field, value): + return True + + +def instance_auth_token(field, value): + return get_default_field_value(field, value) + + +def instance_auth_type(field, value): + return 'basic' + + +def instance_aws_host(field, value): + return get_default_field_value(field, value) + + +def instance_aws_region(field, value): + return get_default_field_value(field, value) + + +def instance_aws_service(field, value): + return get_default_field_value(field, value) + + +def instance_connect_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_db_exclude(field, value): + return get_default_field_value(field, value) + + +def instance_db_include(field, value): + return get_default_field_value(field, value) + + +def instance_disable_generic_tags(field, value): + return False + + +def instance_empty_default_hostname(field, value): + return False + + +def instance_extra_headers(field, value): + return get_default_field_value(field, value) + + +def instance_headers(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_auth(field, value): + return 'disabled' + + +def instance_kerberos_cache(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_delegate(field, value): + return False + + +def instance_kerberos_force_initiate(field, value): + return False + + +def instance_kerberos_hostname(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_keytab(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_principal(field, value): + return get_default_field_value(field, value) + + +def instance_log_requests(field, value): + return False + + +def instance_max_dbs_per_check(field, value): + return 50 + + +def instance_max_nodes_per_check(field, value): + return 20 + + +def instance_min_collection_interval(field, value): + return 15 + + +def instance_name(field, value): + return get_default_field_value(field, value) + + +def instance_ntlm_domain(field, value): + return get_default_field_value(field, value) + + +def instance_password(field, value): + return get_default_field_value(field, value) + + +def instance_persist_connections(field, value): + return False + + +def instance_proxy(field, value): + return get_default_field_value(field, value) + + +def instance_read_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_request_size(field, value): + return 16 + + +def instance_service(field, value): + return get_default_field_value(field, value) + + +def instance_skip_proxy(field, value): + return False + + +def instance_tags(field, value): + return get_default_field_value(field, value) + + +def instance_timeout(field, value): + return 10 + + +def instance_tls_ca_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_ignore_warning(field, value): + return False + + +def instance_tls_private_key(field, value): + return get_default_field_value(field, value) + + +def instance_tls_use_host_header(field, value): + return False + + +def instance_tls_verify(field, value): + return True + + +def instance_use_legacy_auth_encoding(field, value): + return True + + +def instance_username(field, value): + return get_default_field_value(field, value) diff --git a/couch/datadog_checks/couch/config_models/instance.py b/couch/datadog_checks/couch/config_models/instance.py new file mode 100644 index 0000000000000..c27535a9bd2ed --- /dev/null +++ b/couch/datadog_checks/couch/config_models/instance.py @@ -0,0 +1,102 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Any, Mapping, Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + class Config: + allow_mutation = False + + reader: Optional[Mapping[str, Any]] + writer: Optional[Mapping[str, Any]] + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class InstanceConfig(BaseModel): + class Config: + allow_mutation = False + + allow_redirects: Optional[bool] + auth_token: Optional[AuthToken] + auth_type: Optional[str] + aws_host: Optional[str] + aws_region: Optional[str] + aws_service: Optional[str] + connect_timeout: Optional[float] + db_exclude: Optional[Sequence[str]] + db_include: Optional[Sequence[str]] + disable_generic_tags: Optional[bool] + empty_default_hostname: Optional[bool] + extra_headers: Optional[Mapping[str, Any]] + headers: Optional[Mapping[str, Any]] + kerberos_auth: Optional[str] + kerberos_cache: Optional[str] + kerberos_delegate: Optional[bool] + kerberos_force_initiate: Optional[bool] + kerberos_hostname: Optional[str] + kerberos_keytab: Optional[str] + kerberos_principal: Optional[str] + log_requests: Optional[bool] + max_dbs_per_check: Optional[int] + max_nodes_per_check: Optional[int] + min_collection_interval: Optional[float] + name: Optional[str] + ntlm_domain: Optional[str] + password: Optional[str] + persist_connections: Optional[bool] + proxy: Optional[Proxy] + read_timeout: Optional[float] + request_size: Optional[float] + server: str + service: Optional[str] + skip_proxy: Optional[bool] + tags: Optional[Sequence[str]] + timeout: Optional[float] + tls_ca_cert: Optional[str] + tls_cert: Optional[str] + tls_ignore_warning: Optional[bool] + tls_private_key: Optional[str] + tls_use_host_header: Optional[bool] + tls_verify: Optional[bool] + use_legacy_auth_encoding: Optional[bool] + username: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'instance_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'instance_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values)) diff --git a/couch/datadog_checks/couch/config_models/shared.py b/couch/datadog_checks/couch/config_models/shared.py new file mode 100644 index 0000000000000..4fc6216ab6c2f --- /dev/null +++ b/couch/datadog_checks/couch/config_models/shared.py @@ -0,0 +1,54 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class SharedConfig(BaseModel): + class Config: + allow_mutation = False + + proxy: Optional[Proxy] + service: Optional[str] + skip_proxy: Optional[bool] + timeout: Optional[float] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'shared_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'shared_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values)) diff --git a/couch/datadog_checks/couch/config_models/validators.py b/couch/datadog_checks/couch/config_models/validators.py new file mode 100644 index 0000000000000..9d0b0155542cb --- /dev/null +++ b/couch/datadog_checks/couch/config_models/validators.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/couch/datadog_checks/couch/data/conf.yaml.example b/couch/datadog_checks/couch/data/conf.yaml.example index b5a697362d4c8..c61468c02faa5 100644 --- a/couch/datadog_checks/couch/data/conf.yaml.example +++ b/couch/datadog_checks/couch/data/conf.yaml.example @@ -60,7 +60,7 @@ instances: # # password: - ## @param db_include - list of strings - optional - default: all + ## @param db_include - list of strings - optional ## The `db_include` should contain the names of the databases meant to be checked. ## If no include list is specified, all databases will be checked. ## From 858bf534e428c902fd5601f86cdb38ed16fa0c67 Mon Sep 17 00:00:00 2001 From: Dusan Jovanovic Date: Mon, 8 Nov 2021 12:47:25 -0500 Subject: [PATCH 11/19] update DBM instructions for requesting beta access (#10524) --- sqlserver/assets/configuration/spec.yaml | 3 ++- sqlserver/datadog_checks/sqlserver/data/conf.yaml.example | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/sqlserver/assets/configuration/spec.yaml b/sqlserver/assets/configuration/spec.yaml index b9b0e762251b1..cd0a5f7a680a5 100644 --- a/sqlserver/assets/configuration/spec.yaml +++ b/sqlserver/assets/configuration/spec.yaml @@ -207,7 +207,8 @@ files: example: "" - name: dbm description: | - Set to `true` to enable the Database Monitoring beta. + Set to `true` to enable Database Monitoring. + Reach out to your Customer Success Manager to request access to the beta. value: type: boolean example: false diff --git a/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example b/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example index ccf933472851c..3241b7f3b724a 100644 --- a/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example +++ b/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example @@ -196,7 +196,8 @@ instances: # connection_string: ## @param dbm - boolean - optional - default: false - ## Set to `true` to enable the Database Monitoring beta. + ## Set to `true` to enable Database Monitoring. + ## Reach out to your Customer Success Manager to request access to the beta. # # dbm: false From b7b251f286bd10a4e84a1bc76824f6566a87d3db Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Mon, 8 Nov 2021 12:58:15 -0500 Subject: [PATCH 12/19] Add runtime configuration validation (#8968) * Sync config models * re-sync * address --- pgbouncer/assets/configuration/spec.yaml | 1 - .../pgbouncer/config_models/__init__.py | 18 +++++++ .../pgbouncer/config_models/defaults.py | 52 +++++++++++++++++++ .../pgbouncer/config_models/instance.py | 52 +++++++++++++++++++ .../pgbouncer/config_models/shared.py | 42 +++++++++++++++ .../pgbouncer/config_models/validators.py | 3 ++ .../pgbouncer/data/conf.yaml.example | 5 +- 7 files changed, 170 insertions(+), 3 deletions(-) create mode 100644 pgbouncer/datadog_checks/pgbouncer/config_models/__init__.py create mode 100644 pgbouncer/datadog_checks/pgbouncer/config_models/defaults.py create mode 100644 pgbouncer/datadog_checks/pgbouncer/config_models/instance.py create mode 100644 pgbouncer/datadog_checks/pgbouncer/config_models/shared.py create mode 100644 pgbouncer/datadog_checks/pgbouncer/config_models/validators.py diff --git a/pgbouncer/assets/configuration/spec.yaml b/pgbouncer/assets/configuration/spec.yaml index ebbf7478f9495..999ca220f104b 100644 --- a/pgbouncer/assets/configuration/spec.yaml +++ b/pgbouncer/assets/configuration/spec.yaml @@ -14,7 +14,6 @@ files: If the password contains special characters, be sure to escape them using percent encoding. See: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING - required: true value: example: postgresql://:@:/?sslmode=require type: string diff --git a/pgbouncer/datadog_checks/pgbouncer/config_models/__init__.py b/pgbouncer/datadog_checks/pgbouncer/config_models/__init__.py new file mode 100644 index 0000000000000..ba42dbdc7ffb0 --- /dev/null +++ b/pgbouncer/datadog_checks/pgbouncer/config_models/__init__.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/pgbouncer/datadog_checks/pgbouncer/config_models/defaults.py b/pgbouncer/datadog_checks/pgbouncer/config_models/defaults.py new file mode 100644 index 0000000000000..8023117bf25d5 --- /dev/null +++ b/pgbouncer/datadog_checks/pgbouncer/config_models/defaults.py @@ -0,0 +1,52 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.utils.models.fields import get_default_field_value + + +def shared_service(field, value): + return get_default_field_value(field, value) + + +def instance_database_url(field, value): + return 'postgresql://:@:/?sslmode=require' + + +def instance_disable_generic_tags(field, value): + return False + + +def instance_empty_default_hostname(field, value): + return False + + +def instance_host(field, value): + return get_default_field_value(field, value) + + +def instance_min_collection_interval(field, value): + return 15 + + +def instance_password(field, value): + return get_default_field_value(field, value) + + +def instance_port(field, value): + return get_default_field_value(field, value) + + +def instance_service(field, value): + return get_default_field_value(field, value) + + +def instance_tags(field, value): + return get_default_field_value(field, value) + + +def instance_use_cached(field, value): + return True + + +def instance_username(field, value): + return get_default_field_value(field, value) diff --git a/pgbouncer/datadog_checks/pgbouncer/config_models/instance.py b/pgbouncer/datadog_checks/pgbouncer/config_models/instance.py new file mode 100644 index 0000000000000..21bd705073c29 --- /dev/null +++ b/pgbouncer/datadog_checks/pgbouncer/config_models/instance.py @@ -0,0 +1,52 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class InstanceConfig(BaseModel): + class Config: + allow_mutation = False + + database_url: Optional[str] + disable_generic_tags: Optional[bool] + empty_default_hostname: Optional[bool] + host: Optional[str] + min_collection_interval: Optional[float] + password: Optional[str] + port: Optional[int] + service: Optional[str] + tags: Optional[Sequence[str]] + use_cached: Optional[bool] + username: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'instance_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'instance_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values)) diff --git a/pgbouncer/datadog_checks/pgbouncer/config_models/shared.py b/pgbouncer/datadog_checks/pgbouncer/config_models/shared.py new file mode 100644 index 0000000000000..d1c10eced36ca --- /dev/null +++ b/pgbouncer/datadog_checks/pgbouncer/config_models/shared.py @@ -0,0 +1,42 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class SharedConfig(BaseModel): + class Config: + allow_mutation = False + + service: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'shared_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'shared_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values)) diff --git a/pgbouncer/datadog_checks/pgbouncer/config_models/validators.py b/pgbouncer/datadog_checks/pgbouncer/config_models/validators.py new file mode 100644 index 0000000000000..9d0b0155542cb --- /dev/null +++ b/pgbouncer/datadog_checks/pgbouncer/config_models/validators.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/pgbouncer/datadog_checks/pgbouncer/data/conf.yaml.example b/pgbouncer/datadog_checks/pgbouncer/data/conf.yaml.example index 0f1259e0f5a00..f3f7dcd36b0b8 100644 --- a/pgbouncer/datadog_checks/pgbouncer/data/conf.yaml.example +++ b/pgbouncer/datadog_checks/pgbouncer/data/conf.yaml.example @@ -13,14 +13,15 @@ init_config: # instances: - ## @param database_url - string - required + - + ## @param database_url - string - optional - default: postgresql://:@:/?sslmode=require ## The PgBouncer stats database URL. ## ## If the password contains special characters, be sure to escape them using percent encoding. ## ## See: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING # - - database_url: postgresql://:@:/?sslmode=require + # database_url: postgresql://:@:/?sslmode=require ## @param host - string - optional ## If `database_url` is not used, set up the host to connect to with the `host` parameter. From 703ee741e73d60a10d2965d91375c0ad6ccfcaad Mon Sep 17 00:00:00 2001 From: cswatt Date: Mon, 8 Nov 2021 14:01:43 -0500 Subject: [PATCH 13/19] [DOCS-2262] route information to alertmanager docs (#9830) Co-authored-by: Austin Lai <76412946+alai97@users.noreply.github.com> --- prometheus/README.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/prometheus/README.md b/prometheus/README.md index 1b24564f6a7ee..6457cd6703e8b 100644 --- a/prometheus/README.md +++ b/prometheus/README.md @@ -63,7 +63,7 @@ Prometheus Alertmanager alerts are automatically sent to your Datadog event stre The Prometheus check does not include any service checks. ## Prometheus Alertmanager -Send Prometheus Alertmanager alerts in the event stream. +Send Prometheus Alertmanager alerts in the event stream. Natively, Alertmanager sends all alerts simultaneously to the configured webhook. To see alerts in Datadog, you must configure your instance of Alertmanager to send alerts one at a time. You can add a group-by parameter under `route` to have alerts grouped by the actual name of the alert rule. ### Setup 1. Edit the Alertmanager configuration file, `alertmanager.yml`, to include the following: @@ -73,7 +73,16 @@ receivers: webhook_configs: - send_resolved: true url: https://app.datadoghq.com/intake/webhook/prometheus?api_key= +route: + group_by: ['alertname'] + group_wait: 10s + group_interval: 5m + receiver: datadog + repeat_interval: 3h ``` + +**Note**: This endpoint accepts only one event in the payload at a time. + 2. Restart the Prometheus and Alertmanager services. ``` sudo systemctl restart prometheus.service alertmanager.service From f459d287e36463ccd94c248ec6df7ce2fc90c10a Mon Sep 17 00:00:00 2001 From: cswatt Date: Mon, 8 Nov 2021 14:05:54 -0500 Subject: [PATCH 14/19] [DOCS 1496] Update kube-apiserver-metrics readme with cluster agent note (#8780) Co-authored-by: Kaylyn --- kube_apiserver_metrics/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kube_apiserver_metrics/README.md b/kube_apiserver_metrics/README.md index 8f08a7fd24edd..1d8dffb3b88e6 100644 --- a/kube_apiserver_metrics/README.md +++ b/kube_apiserver_metrics/README.md @@ -26,10 +26,10 @@ annotations: '[{ "prometheus_url": "https://%%host%%:%%port%%/metrics", "bearer_token_auth": "true" }]' ``` -Then the Datadog Cluster Agent schedules the check(s) for each endpoint onto Datadog Agent(s). +Then the Datadog Cluster Agent schedules the check(s) for each endpoint onto Datadog Agent(s). You can also run the check by configuring the endpoints directly in the `kube_apiserver_metrics.d/conf.yaml` file, in the `conf.d/` folder at the root of your [Agent's configuration directory][4]. -See the [sample kube_apiserver_metrics.d/conf.yaml][2] for all available configuration options. +You must add `cluster_check: true` to your [configuration file][9] when using a static configuration file or ConfigMap to configure cluster checks. See the [sample kube_apiserver_metrics.d/conf.yaml][2] for all available configuration options. By default the Agent running the check tries to get the service account bearer token to authenticate against the APIServer. If you are not using RBACs, set `bearer_token_auth` to `false`. @@ -65,3 +65,4 @@ Need help? Contact [Datadog support][8]. [6]: https://docs.datadoghq.com/agent/faq/agent-commands/#agent-status-and-information [7]: https://github.com/DataDog/integrations-core/blob/master/kube_apiserver_metrics/metadata.csv [8]: https://docs.datadoghq.com/help/ +[9]: https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/#set-up-cluster-checks From 1cd174e2405b3009880da5bbb40252bf88458d45 Mon Sep 17 00:00:00 2001 From: Fanny Jiang Date: Mon, 8 Nov 2021 15:03:53 -0500 Subject: [PATCH 15/19] Adds support for include_labels (#10493) * initial commit for include_labels * add tests * add include_labels to v2, add tests * resolve config sync * update config specs * sync configs and data models * Revert "sync configs and data models" This reverts commit c20bd3d8816525051f58429069ca6ed462c8dbf0. * remove changes to config templates * resync configs and models * apply suggestions from review * address comments from review, add tests * add additional tests for more v1 coverage * clean up tests --- .../base/checks/openmetrics/base_check.py | 1 + .../base/checks/openmetrics/mixins.py | 11 +++- .../base/checks/openmetrics/v2/scraper.py | 15 +++++ .../base/checks/openmetrics/test_config.py | 14 +++++ .../checks/openmetrics/test_openmetrics.py | 56 +++++++++++++++---- .../base/checks/openmetrics/test_options.py | 46 ++++++++++++--- 6 files changed, 123 insertions(+), 20 deletions(-) diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py index a623b124b639d..13e96a43673e4 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py @@ -16,6 +16,7 @@ 'metrics', 'prometheus_metrics_prefix', 'health_service_check', + 'include_labels', 'label_to_hostname', 'label_joins', 'labels_mapper', diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py index cd0a4f385d2c4..6fa76e2676a8d 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py @@ -244,10 +244,14 @@ def create_scraper_configuration(self, instance=None): # Rename bucket "le" label to "upper_bound" config['labels_mapper']['le'] = 'upper_bound' - # `exclude_labels` is an array of labels names to exclude. Those labels + # `exclude_labels` is an array of label names to exclude. Those labels # will just not be added as tags when submitting the metric. config['exclude_labels'] = default_instance.get('exclude_labels', []) + instance.get('exclude_labels', []) + # `include_labels` is an array of label names to include. If these labels are not in + # the `exclude_labels` list, then they are added as tags when submitting the metric. + config['include_labels'] = default_instance.get('include_labels', []) + instance.get('include_labels', []) + # `type_overrides` is a dictionary where the keys are prometheus metric names # and the values are a metric type (name as string) to use instead of the one # listed in the payload. It can be used to force a type on untyped metrics. @@ -1113,8 +1117,9 @@ def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None): _tags.extend(scraper_config['_metric_tags']) for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]): if label_name not in scraper_config['exclude_labels']: - tag_name = scraper_config['labels_mapper'].get(label_name, label_name) - _tags.append('{}:{}'.format(to_native_string(tag_name), to_native_string(label_value))) + if label_name in scraper_config['include_labels'] or len(scraper_config['include_labels']) == 0: + tag_name = scraper_config['labels_mapper'].get(label_name, label_name) + _tags.append('{}:{}'.format(to_native_string(tag_name), to_native_string(label_value))) return self._finalize_tags_to_submit( _tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname ) diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py index a657f55f73799..0ad87e44fae8a 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py @@ -102,6 +102,19 @@ def __init__(self, check, config): self.exclude_labels.add(entry) + include_labels = config.get('include_labels', []) + if not isinstance(include_labels, list): + raise ConfigurationError('Setting `include_labels` must be an array') + self.include_labels = set() + for i, entry in enumerate(include_labels, 1): + if not isinstance(entry, str): + raise ConfigurationError(f'Entry #{i} of setting `include_labels` must be a string') + if entry in self.exclude_labels: + self.log.debug( + 'Label `%s` is set in both `exclude_labels` and `include_labels`. Excluding label.', entry + ) + self.include_labels.add(entry) + self.rename_labels = config.get('rename_labels', {}) if not isinstance(self.rename_labels, dict): raise ConfigurationError('Setting `rename_labels` must be a mapping') @@ -287,6 +300,8 @@ def generate_sample_data(self, metric): break elif label_name in self.exclude_labels: continue + elif self.include_labels and label_name not in self.include_labels: + continue label_name = self.rename_labels.get(label_name, label_name) tags.append(f'{label_name}:{label_value}') diff --git a/datadog_checks_base/tests/base/checks/openmetrics/test_config.py b/datadog_checks_base/tests/base/checks/openmetrics/test_config.py index 28c2eea0c2e3b..2354442b1eb28 100644 --- a/datadog_checks_base/tests/base/checks/openmetrics/test_config.py +++ b/datadog_checks_base/tests/base/checks/openmetrics/test_config.py @@ -86,6 +86,20 @@ def test_entry_invalid_type(self, dd_run_check): dd_run_check(check, extract_message=True) +class TestIncludeLabels: + def test_inc_not_array(self, dd_run_check): + check = get_check({'include_labels': 9000}) + + with pytest.raises(Exception, match='^Setting `include_labels` must be an array$'): + dd_run_check(check, extract_message=True) + + def test_inc_entry_invalid_type(self, dd_run_check): + check = get_check({'include_labels': [9000]}) + + with pytest.raises(Exception, match='^Entry #1 of setting `include_labels` must be a string$'): + dd_run_check(check, extract_message=True) + + class TestRenameLabels: def test_not_mapping(self, dd_run_check): check = get_check({'rename_labels': 9000}) diff --git a/datadog_checks_base/tests/base/checks/openmetrics/test_openmetrics.py b/datadog_checks_base/tests/base/checks/openmetrics/test_openmetrics.py index bef8223dadaa2..bc9959d731264 100644 --- a/datadog_checks_base/tests/base/checks/openmetrics/test_openmetrics.py +++ b/datadog_checks_base/tests/base/checks/openmetrics/test_openmetrics.py @@ -357,10 +357,49 @@ def test_submit_gauge_with_labels_mapper(aggregator, mocked_prometheus_check, mo ) -def test_submit_gauge_with_exclude_labels(aggregator, mocked_prometheus_check, mocked_prometheus_scraper_config): +@pytest.mark.parametrize( + 'excluded_labels, included_labels, expected', + ( + ( + ['my_2nd_label', 'whatever_else', 'env'], + [], + ['env:dev', 'app:my_pretty_app', 'transformed_1st_label:my_1st_label_value'], + ), + ( + [], + ['my_2nd_label', 'whatever_else', 'env'], + ['env:dev', 'app:my_pretty_app', 'my_2nd_label:my_2nd_label_value'], + ), + ( + ['my_2nd_label', 'whatever_else', 'env'], + ['my_1st_label', 'whatever_else', 'env'], + ['env:dev', 'app:my_pretty_app', 'transformed_1st_label:my_1st_label_value'], + ), + ( + ['my_2nd_label', 'whatever_else', 'env'], + ['my_1st_label', 'my_2nd_label', 'whatever_else', 'env'], + ['env:dev', 'app:my_pretty_app', 'transformed_1st_label:my_1st_label_value'], + ), + ), + ids=( + 'Test excluded labels.', + 'Test included labels.', + 'Test both excluded and included labels, no override.', + 'Test both excluded and included labels with override.', + ), +) +def test_submit_gauge_with_exclude_include_labels( + aggregator, + mocked_prometheus_check, + mocked_prometheus_scraper_config, + excluded_labels, + included_labels, + expected, +): """ - Submitting metrics when filtering with exclude_labels should end up with - a filtered tags list + Submitting metrics when filtering with exclude_labels and/or include_labels should + end up with a filtered tags list, where exclude_labels are excluded and + include_labels are included. Labels that are present in both exclude_labels and include_labels are excluded. """ ref_gauge = GaugeMetricFamily( 'process_virtual_memory_bytes', 'Virtual memory size in bytes.', labels=['my_1st_label', 'my_2nd_label'] @@ -369,22 +408,19 @@ def test_submit_gauge_with_exclude_labels(aggregator, mocked_prometheus_check, m check = mocked_prometheus_check mocked_prometheus_scraper_config['labels_mapper'] = { - 'my_1st_label': 'transformed_1st', + 'my_1st_label': 'transformed_1st_label', 'non_existent': 'should_not_matter', 'env': 'dont_touch_custom_tags', } mocked_prometheus_scraper_config['custom_tags'] = ['env:dev', 'app:my_pretty_app'] - mocked_prometheus_scraper_config['exclude_labels'] = [ - 'my_2nd_label', - 'whatever_else', - 'env', - ] # custom tags are not filtered out + mocked_prometheus_scraper_config['exclude_labels'] = excluded_labels + mocked_prometheus_scraper_config['include_labels'] = included_labels metric = mocked_prometheus_scraper_config['metrics_mapper'][ref_gauge.name] check.submit_openmetric(metric, ref_gauge, mocked_prometheus_scraper_config) aggregator.assert_metric( 'prometheus.process.vm.bytes', 54927360.0, - tags=['env:dev', 'app:my_pretty_app', 'transformed_1st:my_1st_label_value'], + tags=expected, count=1, ) diff --git a/datadog_checks_base/tests/base/checks/openmetrics/test_options.py b/datadog_checks_base/tests/base/checks/openmetrics/test_options.py index b64f74e318aee..fc0c9d358c5b5 100644 --- a/datadog_checks_base/tests/base/checks/openmetrics/test_options.py +++ b/datadog_checks_base/tests/base/checks/openmetrics/test_options.py @@ -145,21 +145,53 @@ def test(self, aggregator, dd_run_check, mock_http_response): aggregator.assert_all_metrics_covered() -class TestExcludeLabels: - def test(self, aggregator, dd_run_check, mock_http_response): +@pytest.mark.parametrize( + 'excluded_labels, included_labels, expected_a, expected_b, expected_c', + ( + (['foo'], [], ['endpoint:test'], ['endpoint:test', 'bar:foo'], ['endpoint:test', 'zip:zap']), + ([], ['foo'], ['endpoint:test', 'foo:bar'], ['endpoint:test'], ['endpoint:test', 'foo:bar']), + (['bar'], ['foo'], ['endpoint:test', 'foo:bar'], ['endpoint:test'], ['endpoint:test', 'foo:bar']), + (['foo', 'bar'], ['zip', 'bar'], ['endpoint:test'], ['endpoint:test'], ['endpoint:test', 'zip:zap']), + ), + ids=( + 'Test excluded labels.', + 'Test included labels.', + 'Test excluded and included labels, no override.', + 'Test excluded and included labels, with override.', + ), +) +class TestExcludeIncludeLabels: + def test( + self, + aggregator, + dd_run_check, + mock_http_response, + excluded_labels, + included_labels, + expected_a, + expected_b, + expected_c, + ): mock_http_response( """ # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge - go_memstats_alloc_bytes{foo="bar",bar="baz"} 6.396288e+06 + go_memstats_alloc_bytes{foo="bar"} 6.396288e+06 + # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. + # TYPE go_memstats_gc_sys_bytes gauge + go_memstats_gc_sys_bytes{bar="foo"} 901120 + # HELP go_memstats_free_bytes Number of bytes free and available for use. + # TYPE go_memstats_free_bytes gauge + go_memstats_free_bytes{foo="bar", zip="zap"} 6.396288e+06 """ ) - check = get_check({'metrics': ['.+'], 'exclude_labels': ['foo']}) + + check = get_check({'metrics': ['.+'], 'include_labels': included_labels, 'exclude_labels': excluded_labels}) dd_run_check(check) - aggregator.assert_metric( - 'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz'] - ) + aggregator.assert_metric('test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=expected_a) + aggregator.assert_metric('test.go_memstats_gc_sys_bytes', 901120, metric_type=aggregator.GAUGE, tags=expected_b) + aggregator.assert_metric('test.go_memstats_free_bytes', 6396288, metric_type=aggregator.GAUGE, tags=expected_c) aggregator.assert_all_metrics_covered() From 5740da0439d1985ba4657ccff0c1ccc0ac9a8413 Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Mon, 8 Nov 2021 15:08:15 -0500 Subject: [PATCH 16/19] Add runtime configuration validation (#8896) * Sync config models * re-sync * re-sync * Update tox.ini * try --- .../config_models/__init__.py | 18 ++ .../config_models/defaults.py | 180 ++++++++++++++++++ .../config_models/instance.py | 101 ++++++++++ .../cloud_foundry_api/config_models/shared.py | 54 ++++++ .../config_models/validators.py | 3 + cloud_foundry_api/tox.ini | 2 + 6 files changed, 358 insertions(+) create mode 100644 cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/__init__.py create mode 100644 cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/defaults.py create mode 100644 cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/instance.py create mode 100644 cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/shared.py create mode 100644 cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/validators.py diff --git a/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/__init__.py b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/__init__.py new file mode 100644 index 0000000000000..ba42dbdc7ffb0 --- /dev/null +++ b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/__init__.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/defaults.py b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/defaults.py new file mode 100644 index 0000000000000..cb2e9fdd1d962 --- /dev/null +++ b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/defaults.py @@ -0,0 +1,180 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.utils.models.fields import get_default_field_value + + +def shared_proxy(field, value): + return get_default_field_value(field, value) + + +def shared_service(field, value): + return get_default_field_value(field, value) + + +def shared_skip_proxy(field, value): + return False + + +def shared_timeout(field, value): + return 10 + + +def instance_allow_redirects(field, value): + return True + + +def instance_auth_token(field, value): + return get_default_field_value(field, value) + + +def instance_auth_type(field, value): + return 'basic' + + +def instance_aws_host(field, value): + return get_default_field_value(field, value) + + +def instance_aws_region(field, value): + return get_default_field_value(field, value) + + +def instance_aws_service(field, value): + return get_default_field_value(field, value) + + +def instance_connect_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_disable_generic_tags(field, value): + return False + + +def instance_empty_default_hostname(field, value): + return False + + +def instance_event_filter(field, value): + return get_default_field_value(field, value) + + +def instance_extra_headers(field, value): + return get_default_field_value(field, value) + + +def instance_headers(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_auth(field, value): + return 'disabled' + + +def instance_kerberos_cache(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_delegate(field, value): + return False + + +def instance_kerberos_force_initiate(field, value): + return False + + +def instance_kerberos_hostname(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_keytab(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_principal(field, value): + return get_default_field_value(field, value) + + +def instance_log_requests(field, value): + return False + + +def instance_min_collection_interval(field, value): + return 15 + + +def instance_ntlm_domain(field, value): + return get_default_field_value(field, value) + + +def instance_password(field, value): + return get_default_field_value(field, value) + + +def instance_persist_connections(field, value): + return False + + +def instance_proxy(field, value): + return get_default_field_value(field, value) + + +def instance_read_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_request_size(field, value): + return 16 + + +def instance_results_per_page(field, value): + return 100 + + +def instance_service(field, value): + return get_default_field_value(field, value) + + +def instance_skip_proxy(field, value): + return False + + +def instance_tags(field, value): + return get_default_field_value(field, value) + + +def instance_timeout(field, value): + return 10 + + +def instance_tls_ca_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_ignore_warning(field, value): + return False + + +def instance_tls_private_key(field, value): + return get_default_field_value(field, value) + + +def instance_tls_use_host_header(field, value): + return False + + +def instance_tls_verify(field, value): + return True + + +def instance_use_legacy_auth_encoding(field, value): + return True + + +def instance_username(field, value): + return get_default_field_value(field, value) diff --git a/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/instance.py b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/instance.py new file mode 100644 index 0000000000000..e3058161bbfd6 --- /dev/null +++ b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/instance.py @@ -0,0 +1,101 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Any, Mapping, Optional, Sequence + +from pydantic import BaseModel, Field, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + class Config: + allow_mutation = False + + reader: Optional[Mapping[str, Any]] + writer: Optional[Mapping[str, Any]] + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class InstanceConfig(BaseModel): + class Config: + allow_mutation = False + + allow_redirects: Optional[bool] + api_url: str + auth_token: Optional[AuthToken] + auth_type: Optional[str] + aws_host: Optional[str] + aws_region: Optional[str] + aws_service: Optional[str] + client_id: str + client_secret: str + connect_timeout: Optional[float] + disable_generic_tags: Optional[bool] + empty_default_hostname: Optional[bool] + event_filter: Optional[Sequence[str]] + extra_headers: Optional[Mapping[str, Any]] + headers: Optional[Mapping[str, Any]] + kerberos_auth: Optional[str] + kerberos_cache: Optional[str] + kerberos_delegate: Optional[bool] + kerberos_force_initiate: Optional[bool] + kerberos_hostname: Optional[str] + kerberos_keytab: Optional[str] + kerberos_principal: Optional[str] + log_requests: Optional[bool] + min_collection_interval: Optional[float] + ntlm_domain: Optional[str] + password: Optional[str] + persist_connections: Optional[bool] + proxy: Optional[Proxy] + read_timeout: Optional[float] + request_size: Optional[float] + results_per_page: Optional[int] = Field(None, le=5000.0) + service: Optional[str] + skip_proxy: Optional[bool] + tags: Optional[Sequence[str]] + timeout: Optional[float] + tls_ca_cert: Optional[str] + tls_cert: Optional[str] + tls_ignore_warning: Optional[bool] + tls_private_key: Optional[str] + tls_use_host_header: Optional[bool] + tls_verify: Optional[bool] + use_legacy_auth_encoding: Optional[bool] + username: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'instance_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'instance_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values)) diff --git a/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/shared.py b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/shared.py new file mode 100644 index 0000000000000..4fc6216ab6c2f --- /dev/null +++ b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/shared.py @@ -0,0 +1,54 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class SharedConfig(BaseModel): + class Config: + allow_mutation = False + + proxy: Optional[Proxy] + service: Optional[str] + skip_proxy: Optional[bool] + timeout: Optional[float] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'shared_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'shared_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values)) diff --git a/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/validators.py b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/validators.py new file mode 100644 index 0000000000000..9d0b0155542cb --- /dev/null +++ b/cloud_foundry_api/datadog_checks/cloud_foundry_api/config_models/validators.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/cloud_foundry_api/tox.ini b/cloud_foundry_api/tox.ini index a81e66dc5598e..fd2b15396280f 100644 --- a/cloud_foundry_api/tox.ini +++ b/cloud_foundry_api/tox.ini @@ -14,6 +14,8 @@ dd_mypy_args = --non-interactive datadog_checks/ tests/ + --exclude + '.*/config_models/.*\.py$' dd_mypy_deps = types-mock==0.1.5 usedevelop = true From b3c1f979fa48e35ca1fc0d936c4459f285060080 Mon Sep 17 00:00:00 2001 From: Andrew Zhang <31313038+yzhan289@users.noreply.github.com> Date: Mon, 8 Nov 2021 16:03:38 -0500 Subject: [PATCH 17/19] Update README links (#10529) * Update README links * Update README to correct link to install agent * Update README' * Apply suggestions from code review Co-authored-by: Julien Lebot * Fix gunicorn link * Reformat all links Co-authored-by: Julien Lebot --- README.md | 2 +- aerospike/README.md | 35 ++++++------ ambari/README.md | 2 +- apache/README.md | 89 +++++++++++++++--------------- avi_vantage/README.md | 27 ++++----- azure_iot_edge/README.md | 2 +- cilium/README.md | 2 +- citrix_hypervisor/README.md | 35 ++++++------ clickhouse/README.md | 2 +- cloud_foundry_api/README.md | 27 ++++----- consul_connect/README.md | 47 ++++++++-------- containerd/README.md | 27 ++++----- cri/README.md | 21 +++---- databricks/README.md | 31 ++++++----- flink/README.md | 2 +- glusterfs/README.md | 39 ++++++------- gunicorn/README.md | 31 ++++++----- hazelcast/README.md | 2 +- hive/README.md | 2 +- hivemq/README.md | 2 +- hudi/README.md | 2 +- hyperv/README.md | 2 +- ibm_db2/README.md | 2 +- ibm_i/README.md | 31 ++++++----- ignite/README.md | 2 +- kube_apiserver_metrics/README.md | 23 ++++---- kube_controller_manager/README.md | 25 +++++---- kube_metrics_server/README.md | 33 +++++------ kube_scheduler/README.md | 29 +++++----- marklogic/README.md | 47 ++++++++-------- mesos_master/README.md | 2 +- nginx_ingress_controller/README.md | 29 +++++----- oom_kill/README.md | 2 +- openstack/README.md | 48 ++++++++-------- openstack_controller/README.md | 37 +++++++------ otel/README.md | 2 +- process/README.md | 35 ++++++------ prometheus/README.md | 27 ++++----- proxysql/README.md | 2 +- rethinkdb/README.md | 2 +- sap_hana/README.md | 2 +- scylla/README.md | 2 +- sidekiq/README.md | 23 ++++---- singlestore/README.md | 4 +- snowflake/README.md | 2 +- sonarqube/README.md | 2 +- twistlock/README.md | 49 ++++++++-------- vertica/README.md | 2 +- voltdb/README.md | 41 +++++++------- 49 files changed, 480 insertions(+), 456 deletions(-) diff --git a/README.md b/README.md index 6b3639ed7236d..42657409c21f2 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ An up-to-date list of all developers authorized to sign releases can be found [h [5]: https://github.com/DataDog/integrations-extras [6]: https://docs.datadoghq.com/developers/integrations/ [7]: https://github.com/DataDog/datadog-agent -[8]: https://docs.datadoghq.com/agent/ +[8]: https://app.datadoghq.com/account/settings#agent [9]: https://docs.pytest.org/en/latest/ [10]: https://packaging.python.org/tutorials/distributing-packages/ [11]: https://docs.datadoghq.com diff --git a/aerospike/README.md b/aerospike/README.md index ce0a8eecc27b9..9c3bd62652bfe 100644 --- a/aerospike/README.md +++ b/aerospike/README.md @@ -14,7 +14,7 @@ If you use an older Aerospike server version, it is still possible to monitor it ### Installation -The Aerospike check is included in the Datadog Agent package. +The Aerospike check is included in the [Datadog Agent][2] package. No additional installation is needed on your server. ### Configuration @@ -27,9 +27,9 @@ No additional installation is needed on your server. ##### Metric collection To configure this check for an Agent running on a host: -1. Edit the `aerospike.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your aerospike performance data. See the [sample aerospike.d/conf.yaml][2] for all available configuration options. +1. Edit the `aerospike.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your aerospike performance data. See the [sample aerospike.d/conf.yaml][3] for all available configuration options. -2. [Restart the Agent][3]. +2. [Restart the Agent][4]. ##### Log collection @@ -49,9 +49,9 @@ To configure this check for an Agent running on a host: source: aerospike ``` - Change the `path` parameter value and configure them for your environment. See the [sample aerospike.d/conf.yaml][2] for all available configuration options. + Change the `path` parameter value and configure them for your environment. See the [sample aerospike.d/conf.yaml][3] for all available configuration options. -3. [Restart the Agent][3]. +3. [Restart the Agent][4]. @@ -59,7 +59,7 @@ To configure this check for an Agent running on a host: #### Containerized -For containerized environments, see the [Autodiscovery Integration Templates][4] for guidance on applying the parameters below. +For containerized environments, see the [Autodiscovery Integration Templates][5] for guidance on applying the parameters below. ##### Metric collection @@ -73,7 +73,7 @@ For containerized environments, see the [Autodiscovery Integration Templates][4] _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][5]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][6]. | Parameter | Value | | -------------- | --------------------------------------------------- | @@ -84,13 +84,13 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ ### Validation -[Run the Agent's status subcommand][6] and look for `aerospike` under the Checks section. +[Run the Agent's status subcommand][7] and look for `aerospike` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this integration. +See [metadata.csv][8] for a list of metrics provided by this integration. ### Service Checks @@ -103,13 +103,14 @@ Aerospike does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][8]. +Need help? Contact [Datadog support][9]. [1]: https://download.aerospike.com/download/client/python/notes.html#5.0.0 -[2]: https://github.com/DataDog/integrations-core/blob/master/aerospike/datadog_checks/aerospike/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[4]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[5]: https://docs.datadoghq.com/agent/kubernetes/log/ -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/aerospike/metadata.csv -[8]: https://docs.datadoghq.com/help/ +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/integrations-core/blob/master/aerospike/datadog_checks/aerospike/data/conf.yaml.example +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[5]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[6]: https://docs.datadoghq.com/agent/kubernetes/log/ +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/aerospike/metadata.csv +[9]: https://docs.datadoghq.com/help/ diff --git a/ambari/README.md b/ambari/README.md index 90220cb7d112d..9b189a96fa38a 100644 --- a/ambari/README.md +++ b/ambari/README.md @@ -127,7 +127,7 @@ Need help? Contact [Datadog support][10]. [1]: https://ambari.apache.org -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://github.com/DataDog/integrations-core/blob/master/ambari/datadog_checks/ambari/data/conf.yaml.example [4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [5]: https://docs.datadoghq.com/agent/kubernetes/integrations/ diff --git a/apache/README.md b/apache/README.md index de74ab1398a49..8a4e17131d2e0 100644 --- a/apache/README.md +++ b/apache/README.md @@ -10,9 +10,9 @@ The Apache check tracks requests per second, bytes served, number of worker thre ### Installation -The Apache check is packaged with the Agent. To start gathering your Apache metrics and logs, you need to: +The Apache check is packaged with the [Datadog Agent][2]. To start gathering your Apache metrics and logs, you need to: -1. [Install the Agent][2] on your Apache servers. +1. [Install the Agent][3] on your Apache servers. 2. Install `mod_status` on your Apache servers and enable `ExtendedStatus`. @@ -27,7 +27,7 @@ To configure this check for an Agent running on a host: ##### Metric collection -1. Edit the `apache.d/conf.yaml` file in the `conf.d/` folder at the root of your [Agent's configuration directory][3] to start collecting your Apache metrics. See the [sample apache.d/conf.yaml][4] for all available configuration options. +1. Edit the `apache.d/conf.yaml` file in the `conf.d/` folder at the root of your [Agent's configuration directory][4] to start collecting your Apache metrics. See the [sample apache.d/conf.yaml][5] for all available configuration options. ```yaml init_config: @@ -39,7 +39,7 @@ To configure this check for an Agent running on a host: - apache_status_url: http://localhost/server-status?auto ``` -2. [Restart the Agent][5]. +2. [Restart the Agent][6]. ##### Log collection @@ -68,9 +68,9 @@ _Available for Agent versions >6.0_ sourcecategory: http_web_error ``` - See the [sample apache.d/conf.yaml][4] for all available configuration options. + See the [sample apache.d/conf.yaml][5] for all available configuration options. -3. [Restart the Agent][5]. +3. [Restart the Agent][6]. @@ -81,7 +81,7 @@ To configure this check for an Agent running on a container: ##### Metric collection -Set [Autodiscovery Integrations Templates][6] as Docker labels on your application container: +Set [Autodiscovery Integrations Templates][7] as Docker labels on your application container: ```yaml LABEL "com.datadoghq.ad.check_names"='["apache"]' @@ -92,9 +92,9 @@ LABEL "com.datadoghq.ad.instances"='[{"apache_status_url": "http://%%host%%/serv ##### Log collection -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Docker Log Collection][7]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Docker Log Collection][8]. -Then, set [Log Integrations][8] as Docker labels: +Then, set [Log Integrations][9] as Docker labels: ```yaml LABEL "com.datadoghq.ad.logs"='[{"source": "apache", "service": ""}]' @@ -109,7 +109,7 @@ To configure this check for an Agent running on Kubernetes: ##### Metric collection -Set [Autodiscovery Integrations Templates][9] as pod annotations on your application container. Aside from this, templates can also be configured with [a file, a configmap, or a key-value store][10]. +Set [Autodiscovery Integrations Templates][10] as pod annotations on your application container. Aside from this, templates can also be configured with [a file, a configmap, or a key-value store][11]. ```yaml apiVersion: v1 @@ -133,9 +133,9 @@ spec: ##### Log collection -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][11]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][12]. -Then, set [Log Integrations][8] as pod annotations. This can also be configured with [a file, a configmap, or a key-value store][12]. +Then, set [Log Integrations][9] as pod annotations. This can also be configured with [a file, a configmap, or a key-value store][13]. ```yaml apiVersion: v1 @@ -159,7 +159,7 @@ To configure this check for an Agent running on ECS: ##### Metric collection -Set [Autodiscovery Integrations Templates][6] as Docker labels on your application container: +Set [Autodiscovery Integrations Templates][7] as Docker labels on your application container: ```json { @@ -178,9 +178,9 @@ Set [Autodiscovery Integrations Templates][6] as Docker labels on your applicati ##### Log collection -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ECS Log Collection][13]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ECS Log Collection][14]. -Then, set [Log Integrations][8] as Docker labels: +Then, set [Log Integrations][9] as Docker labels: ```json { @@ -199,13 +199,13 @@ Then, set [Log Integrations][8] as Docker labels: ### Validation -[Run the Agent's status subcommand][14] and look for `apache` under the Checks section. +[Run the Agent's status subcommand][15] and look for `apache` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][15] for a list of metrics provided by this check. +See [metadata.csv][16] for a list of metrics provided by this check. ### Events @@ -213,43 +213,44 @@ The Apache check does not include any events. ### Service Checks -See [service_checks.json][16] for a list of service checks provided by this integration. +See [service_checks.json][17] for a list of service checks provided by this integration. ## Troubleshooting ### Apache status URL -If you are having issues with your Apache integration, it is mostly like due to the Agent not being able to access your Apache status URL. Try running curl for the `apache_status_url` listed in [your `apache.d/conf.yaml` file][4] (include your login credentials if applicable). +If you are having issues with your Apache integration, it is mostly like due to the Agent not being able to access your Apache status URL. Try running curl for the `apache_status_url` listed in [your `apache.d/conf.yaml` file][5] (include your login credentials if applicable). -- [Apache SSL certificate issues][17] +- [Apache SSL certificate issues][18] ## Further Reading Additional helpful documentation, links, and articles: -- [Deploying and configuring Datadog with CloudFormation][18] -- [Monitoring Apache web server performance][19] -- [How to collect Apache performance metrics][20] -- [How to monitor Apache web server with Datadog][21] +- [Deploying and configuring Datadog with CloudFormation][19] +- [Monitoring Apache web server performance][20] +- [How to collect Apache performance metrics][21] +- [How to monitor Apache web server with Datadog][22] [1]: https://raw.githubusercontent.com/DataDog/integrations-core/master/apache/images/apache_dashboard.png -[2]: https://docs.datadoghq.com/agent/ -[3]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory -[4]: https://github.com/DataDog/integrations-core/blob/master/apache/datadog_checks/apache/data/conf.yaml.example -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[6]: https://docs.datadoghq.com/agent/docker/integrations/?tab=docker -[7]: https://docs.datadoghq.com/agent/docker/log/?tab=containerinstallation#installation -[8]: https://docs.datadoghq.com/agent/docker/log/?tab=containerinstallation#log-integrations -[9]: https://docs.datadoghq.com/agent/kubernetes/integrations/?tab=kubernetes -[10]: https://docs.datadoghq.com/agent/kubernetes/integrations/?tab=kubernetes#configuration -[11]: https://docs.datadoghq.com/agent/kubernetes/log/?tab=containerinstallation#setup -[12]: https://docs.datadoghq.com/agent/kubernetes/log/?tab=daemonset#configuration -[13]: https://docs.datadoghq.com/agent/amazon_ecs/logs/?tab=linux -[14]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[15]: https://github.com/DataDog/integrations-core/blob/master/apache/metadata.csv -[16]: https://github.com/DataDog/integrations-core/blob/master/apache/assets/service_checks.json -[17]: https://docs.datadoghq.com/integrations/faq/apache-ssl-certificate-issues/ -[18]: https://www.datadoghq.com/blog/deploying-datadog-with-cloudformation -[19]: https://www.datadoghq.com/blog/monitoring-apache-web-server-performance -[20]: https://www.datadoghq.com/blog/collect-apache-performance-metrics -[21]: https://www.datadoghq.com/blog/monitor-apache-web-server-datadog +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://docs.datadoghq.com/agent/ +[4]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory +[5]: https://github.com/DataDog/integrations-core/blob/master/apache/datadog_checks/apache/data/conf.yaml.example +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[7]: https://docs.datadoghq.com/agent/docker/integrations/?tab=docker +[8]: https://docs.datadoghq.com/agent/docker/log/?tab=containerinstallation#installation +[9]: https://docs.datadoghq.com/agent/docker/log/?tab=containerinstallation#log-integrations +[10]: https://docs.datadoghq.com/agent/kubernetes/integrations/?tab=kubernetes +[11]: https://docs.datadoghq.com/agent/kubernetes/integrations/?tab=kubernetes#configuration +[12]: https://docs.datadoghq.com/agent/kubernetes/log/?tab=containerinstallation#setup +[13]: https://docs.datadoghq.com/agent/kubernetes/log/?tab=daemonset#configuration +[14]: https://docs.datadoghq.com/agent/amazon_ecs/logs/?tab=linux +[15]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[16]: https://github.com/DataDog/integrations-core/blob/master/apache/metadata.csv +[17]: https://github.com/DataDog/integrations-core/blob/master/apache/assets/service_checks.json +[18]: https://docs.datadoghq.com/integrations/faq/apache-ssl-certificate-issues/ +[19]: https://www.datadoghq.com/blog/deploying-datadog-with-cloudformation +[20]: https://www.datadoghq.com/blog/monitoring-apache-web-server-performance +[21]: https://www.datadoghq.com/blog/collect-apache-performance-metrics +[22]: https://www.datadoghq.com/blog/monitor-apache-web-server-datadog diff --git a/avi_vantage/README.md b/avi_vantage/README.md index b9ecea027c736..dda96aa3d5bf4 100644 --- a/avi_vantage/README.md +++ b/avi_vantage/README.md @@ -10,28 +10,28 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The Avi Vantage check is included in the [Datadog Agent][2] package. +The Avi Vantage check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. ### Configuration -1. Edit the `avi_vantage.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your avi_vantage performance data. See the [sample avi_vantage.d/conf.yaml][3] for all available configuration options. +1. Edit the `avi_vantage.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your avi_vantage performance data. See the [sample avi_vantage.d/conf.yaml][4] for all available configuration options. -2. [Restart the Agent][4]. +2. [Restart the Agent][5]. ### Validation -[Run the Agent's status subcommand][5] and look for `avi_vantage` under the Checks section. +[Run the Agent's status subcommand][6] and look for `avi_vantage` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][6] for a list of metrics provided by this check. +See [metadata.csv][7] for a list of metrics provided by this check. ### Service Checks -See [service_checks.json][7] for a list of service checks provided by this integration. +See [service_checks.json][8] for a list of service checks provided by this integration. ### Events @@ -39,13 +39,14 @@ Avi Vantage does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][8]. +Need help? Contact [Datadog support][9]. [1]: https://avinetworks.com/why-avi/multi-cloud-load-balancing/ [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://github.com/DataDog/integrations-core/blob/master/avi_vantage/datadog_checks/avi_vantage/data/conf.yaml.example -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[6]: https://github.com/DataDog/integrations-core/blob/master/avi_vantage/metadata.csv -[7]: https://github.com/DataDog/integrations-core/blob/master/avi_vantage/assets/service_checks.json -[8]: https://docs.datadoghq.com/help/ +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://github.com/DataDog/integrations-core/blob/master/avi_vantage/datadog_checks/avi_vantage/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/avi_vantage/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/avi_vantage/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/azure_iot_edge/README.md b/azure_iot_edge/README.md index 3df77750ef493..58fd613cef976 100644 --- a/azure_iot_edge/README.md +++ b/azure_iot_edge/README.md @@ -128,7 +128,7 @@ Need help? Contact [Datadog support][10]. - [Monitor Azure IoT Edge with Datadog][11] [1]: https://azure.microsoft.com/en-us/services/iot-edge/ -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://docs.microsoft.com/en-us/azure/iot-edge/how-to-deploy-modules-portal [4]: https://github.com/DataDog/integrations-core/blob/master/azure_iot_edge/datadog_checks/azure_iot_edge/data/conf.yaml.example [5]: https://docs.datadoghq.com/agent/docker/integrations/ diff --git a/cilium/README.md b/cilium/README.md index 10b36acc93046..1ad5447435e84 100644 --- a/cilium/README.md +++ b/cilium/README.md @@ -124,7 +124,7 @@ Need help? Contact [Datadog support][11]. [1]: https://cilium.io [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://docs.datadoghq.com/agent/ +[3]: https://app.datadoghq.com/account/settings#agent [4]: https://github.com/DataDog/integrations-core/blob/master/cilium/datadog_checks/cilium/data/conf.yaml.example [5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [6]: https://docs.datadoghq.com/agent/kubernetes/?tab=daemonset#installation diff --git a/citrix_hypervisor/README.md b/citrix_hypervisor/README.md index 9b6bb3bb4149f..6268a73f8ba6b 100644 --- a/citrix_hypervisor/README.md +++ b/citrix_hypervisor/README.md @@ -10,21 +10,21 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The Citrix Hypervisor check is included in the [Datadog Agent][2] package. +The Citrix Hypervisor check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. The recommended way to monitor Citrix hypervisors is to install one Datadog Agent on each hypervisor. #### Datadog User -The Citrix Hypervisor integration requires a user with at least [`read-only`][3] access to monitor the service. +The Citrix Hypervisor integration requires a user with at least [`read-only`][4] access to monitor the service. ### Configuration #### Host -1. Edit the `citrix_hypervisor.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Citrix Hypervisor performance data. See the [sample citrix_hypervisor.d/conf.yaml][4] for all available configuration options. +1. Edit the `citrix_hypervisor.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Citrix Hypervisor performance data. See the [sample citrix_hypervisor.d/conf.yaml][5] for all available configuration options. -2. [Restart the Agent][5]. +2. [Restart the Agent][6]. #### Log collection @@ -43,19 +43,19 @@ _Available for Agent versions >6.0_ path: /var/log/xensource.log source: citrix_hypervisor ``` - Change the `path` value and configure it for your environment. See the [sample `citrix_hypervisor.d/conf.yaml` file][4] for all available configuration options. + Change the `path` value and configure it for your environment. See the [sample `citrix_hypervisor.d/conf.yaml` file][5] for all available configuration options. -3. [Restart the Agent][5]. +3. [Restart the Agent][6]. ### Validation -[Run the Agent's status subcommand][6] and look for `citrix_hypervisor` under the Checks section. +[Run the Agent's status subcommand][7] and look for `citrix_hypervisor` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this check. +See [metadata.csv][8] for a list of metrics provided by this check. ### Events @@ -63,19 +63,20 @@ The Citrix Hypervisor integration does not include any events. ### Service Checks -See [service_checks.json][8] for a list of service checks provided by this integration. +See [service_checks.json][9] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][10]. [1]: https://www.citrix.com/products/citrix-hypervisor/ [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://docs.citrix.com/en-us/xencenter/7-1/rbac-roles.html -[4]: https://github.com/DataDog/integrations-core/blob/master/citrix_hypervisor/datadog_checks/citrix_hypervisor/data/conf.yaml.example -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/citrix_hypervisor/metadata.csv -[8]: https://github.com/DataDog/integrations-core/blob/master/citrix_hypervisor/assets/service_checks.json -[9]: https://docs.datadoghq.com/help/ +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://docs.citrix.com/en-us/xencenter/7-1/rbac-roles.html +[5]: https://github.com/DataDog/integrations-core/blob/master/citrix_hypervisor/datadog_checks/citrix_hypervisor/data/conf.yaml.example +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/citrix_hypervisor/metadata.csv +[9]: https://github.com/DataDog/integrations-core/blob/master/citrix_hypervisor/assets/service_checks.json +[10]: https://docs.datadoghq.com/help/ diff --git a/clickhouse/README.md b/clickhouse/README.md index e04d8d5835ac3..22ce6bb098cd4 100644 --- a/clickhouse/README.md +++ b/clickhouse/README.md @@ -100,7 +100,7 @@ Need help? Contact [Datadog support][10]. [1]: https://clickhouse.yandex [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://docs.datadoghq.com/agent/ +[3]: https://app.datadoghq.com/account/settings#agent [4]: https://github.com/DataDog/integrations-core/blob/master/clickhouse/datadog_checks/clickhouse/data/conf.yaml.example [5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [6]: https://docs.datadoghq.com/agent/kubernetes/log/ diff --git a/cloud_foundry_api/README.md b/cloud_foundry_api/README.md index 58ade96babc9e..8008c25da8ec0 100644 --- a/cloud_foundry_api/README.md +++ b/cloud_foundry_api/README.md @@ -10,24 +10,24 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The Cloud Foundry API check is included in the [Datadog Agent][2] package. +The Cloud Foundry API check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. ### Configuration -1. Edit the `cloud_foundry_api.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Cloud Foundry API data. See the [sample cloud_foundry_api.d/conf.yaml][3] for all available configuration options. +1. Edit the `cloud_foundry_api.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Cloud Foundry API data. See the [sample cloud_foundry_api.d/conf.yaml][4] for all available configuration options. -2. [Restart the Agent][4]. +2. [Restart the Agent][5]. ### Validation -[Run the Agent's status subcommand][5] and look for `cloud_foundry_api` under the Checks section. +[Run the Agent's status subcommand][6] and look for `cloud_foundry_api` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][6] for a list of metrics provided by this check. +See [metadata.csv][7] for a list of metrics provided by this check. ### Events @@ -35,18 +35,19 @@ The Cloud Foundry API integration collects the configured audit events. ### Service Checks -See [service_checks.json][7] for a list of service checks provided by this integration. +See [service_checks.json][8] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][8]. +Need help? Contact [Datadog support][9]. [1]: http://v3-apidocs.cloudfoundry.org [2]: https://docs.datadoghq.com/agent/kubernetes/integrations -[3]: https://github.com/DataDog/integrations-core/blob/master/cloud_foundry_api/datadog_checks/cloud_foundry_api/data/conf.yaml.example -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[6]: https://github.com/DataDog/integrations-core/blob/master/cloud_foundry_api/metadata.csv -[7]: https://github.com/DataDog/integrations-core/blob/master/cloud_foundry_api/assets/service_checks.json -[8]: https://docs.datadoghq.com/help +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://github.com/DataDog/integrations-core/blob/master/cloud_foundry_api/datadog_checks/cloud_foundry_api/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/cloud_foundry_api/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/cloud_foundry_api/assets/service_checks.json +[9]: https://docs.datadoghq.com/help diff --git a/consul_connect/README.md b/consul_connect/README.md index 19b201120cc9d..f1b416034da95 100644 --- a/consul_connect/README.md +++ b/consul_connect/README.md @@ -8,7 +8,7 @@ Monitor your [Consul Connect][1] Envoy sidecar proxies with the [Datadog Envoy I ### Installation -Install the Datadog Agent on your services running Consul Connect and follow the [Configuration](#configuration) instructions for your appropriate environment. +Install the [Datadog Agent][4] on your services running Consul Connect and follow the [Configuration](#configuration) instructions for your appropriate environment. ### Configuration Follow the instructions below to configure this check for an Agent running on a host. For containerized environments, see the [Containerized](#containerized) section. @@ -21,44 +21,44 @@ Follow the instructions below to configure this check for an Agent running on a To configure this check for an Agent running on a host: ##### Metric collection -1. In Consul Connect, enable the config option [`-admin-bind`][4] to configure the port where the Envoy Admin API will be exposed. +1. In Consul Connect, enable the config option [`-admin-bind`][5] to configure the port where the Envoy Admin API will be exposed. -2. Enable the [Envoy integration][5] to configure metric collection. +2. Enable the [Envoy integration][6] to configure metric collection. ##### Log collection -Follow the [Envoy host][6] instructions to configure log collection. +Follow the [Envoy host][7] instructions to configure log collection. #### Containerized -Follow the [Envoy containerized instructions][7] to configure your Datadog Agent for Envoy. +Follow the [Envoy containerized instructions][8] to configure your Datadog Agent for Envoy. ##### Metric collection -1. In Consul Connect, enable the config option [`envoy_stats_bind_addr`][8] to ensure the `/stats` endpoint is exposed on the public network. +1. In Consul Connect, enable the config option [`envoy_stats_bind_addr`][9] to ensure the `/stats` endpoint is exposed on the public network. - 2. Configure the [Envoy integration for containerized environments instructions][9] to start collecting metrics. + 2. Configure the [Envoy integration for containerized environments instructions][10] to start collecting metrics. ##### Log collection -Follow the [Envoy containerized instructions][10] to configure log collection. +Follow the [Envoy containerized instructions][11] to configure log collection. ### Validation -[Run the Agent's status subcommand][11] and look for `envoy` under the Checks section. +[Run the Agent's status subcommand][12] and look for `envoy` under the Checks section. ## Data Collected ### Metrics -See the [Envoy Integration documentation][12] for a list of metrics collected. +See the [Envoy Integration documentation][13] for a list of metrics collected. ### Service Checks -See the [Envoy Integration documentation][13] for the list of service checks collected. +See the [Envoy Integration documentation][14] for the list of service checks collected. ### Events @@ -66,19 +66,20 @@ Consul Connect does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][14]. +Need help? Contact [Datadog support][15]. [1]: https://www.consul.io/docs/connect#connect [2]: https://docs.datadoghq.com/integrations/envoy/ [3]: https://www.consul.io/docs/connect/proxies/envoy#envoy-integration -[4]: https://www.consul.io/commands/connect/envoy#admin-bind -[5]: https://docs.datadoghq.com/integrations/envoy/?tab=host#metric-collection -[6]: https://docs.datadoghq.com/integrations/envoy/?tab=host#log-collection -[7]: https://docs.datadoghq.com/integrations/envoy/?tab=containerized#containerized -[8]: https://www.consul.io/docs/connect/proxies/envoy#envoy_stats_bind_addr -[9]: https://docs.datadoghq.com/integrations/envoy/?tab=containerized#metric-collection -[10]: https://docs.datadoghq.com/integrations/envoy/?tab=containerized#log-collection -[11]: https://docs.datadoghq.com/agent/guide/agent-commands/?#agent-status-and-information -[12]: https://docs.datadoghq.com/integrations/envoy/?tab=host#metrics -[13]: https://docs.datadoghq.com/integrations/envoy/?tab=host#service-checks -[14]: https://docs.datadoghq.com/help/ +[4]: https://app.datadoghq.com/account/settings#agent +[5]: https://www.consul.io/commands/connect/envoy#admin-bind +[6]: https://docs.datadoghq.com/integrations/envoy/?tab=host#metric-collection +[7]: https://docs.datadoghq.com/integrations/envoy/?tab=host#log-collection +[8]: https://docs.datadoghq.com/integrations/envoy/?tab=containerized#containerized +[9]: https://www.consul.io/docs/connect/proxies/envoy#envoy_stats_bind_addr +[10]: https://docs.datadoghq.com/integrations/envoy/?tab=containerized#metric-collection +[11]: https://docs.datadoghq.com/integrations/envoy/?tab=containerized#log-collection +[12]: https://docs.datadoghq.com/agent/guide/agent-commands/?#agent-status-and-information +[13]: https://docs.datadoghq.com/integrations/envoy/?tab=host#metrics +[14]: https://docs.datadoghq.com/integrations/envoy/?tab=host#service-checks +[15]: https://docs.datadoghq.com/help/ diff --git a/containerd/README.md b/containerd/README.md index 1ee80fa3be5a9..8412986f77e63 100644 --- a/containerd/README.md +++ b/containerd/README.md @@ -8,7 +8,7 @@ This check monitors the Containerd container runtime. ### Installation -Containerd is a core Agent 6 check. You must configure Containerd in both `datadog.yaml` and `containerd.d/conf.yaml`. +Containerd is a core [Datadog Agent][1] check. You must configure Containerd in both `datadog.yaml` and `containerd.d/conf.yaml`. In `datadog.yaml`, configure your `cri_socket_path` for the Agent to query Containerd. In `containerd.d/conf.yaml`, configure the check instance settings (such as `filters`) for the events. @@ -87,13 +87,13 @@ spec: ### Configuration -1. Edit the `containerd.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Containerd performance data. See the [sample containerd.d/conf.yaml][1] for all available configuration options. +1. Edit the `containerd.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Containerd performance data. See the [sample containerd.d/conf.yaml][2] for all available configuration options. -2. [Restart the Agent][2] +2. [Restart the Agent][3] ### Validation -[Run the Agent's `status` subcommand][3] and look for `containerd` under the Checks section. +[Run the Agent's `status` subcommand][4] and look for `containerd` under the Checks section. ## Data Collected @@ -103,23 +103,24 @@ Containerd collects metrics about the resource usage of your containers. CPU, memory, block I/O, or huge page table metrics are collected out of the box. Additionally, you can also collect some disk metrics. -See [metadata.csv][4] for a list of metrics provided by this integration. +See [metadata.csv][5] for a list of metrics provided by this integration. ### Events -The Containerd check can collect events. Use `filters` to select the relevant events. Refer to the [sample containerd.d/conf.yaml][1] to have more details. +The Containerd check can collect events. Use `filters` to select the relevant events. Refer to the [sample containerd.d/conf.yaml][2] to have more details. ### Service Checks -See [service_checks.json][5] for a list of service checks provided by this integration. +See [service_checks.json][6] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][2]. +Need help? Contact [Datadog support][3]. -[1]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/containerd.d/conf.yaml.default -[2]: https://docs.datadoghq.com/help/ -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[4]: https://github.com/DataDog/integrations-core/blob/master/containerd/metadata.csv -[5]: https://github.com/DataDog/integrations-core/blob/master/containerd/assets/service_checks.json +[1]: https://app.datadoghq.com/account/settings#agent +[2]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/containerd.d/conf.yaml.default +[3]: https://docs.datadoghq.com/help/ +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[5]: https://github.com/DataDog/integrations-core/blob/master/containerd/metadata.csv +[6]: https://github.com/DataDog/integrations-core/blob/master/containerd/assets/service_checks.json diff --git a/cri/README.md b/cri/README.md index 36d7ca20a416f..60ad9ed8c8a7e 100644 --- a/cri/README.md +++ b/cri/README.md @@ -8,7 +8,7 @@ This check monitors a Container Runtime Interface ### Installation -CRI is a core agent 6 check and thus need to be configured in both in `datadog.yaml` and with `cri.d/conf.yaml`. +CRI is a core [Datadog Agent][1] check and thus need to be configured in both in `datadog.yaml` and with `cri.d/conf.yaml`. In `datadog.yaml` you will need to configure your `cri_socket_path` for the agent to query your current CRI (you can also configure default timeouts) and in `cri.d/conf.yaml` you can configure the check instance settings such as `collect_disk` if your CRI (such as `containerd`) reports disk usage metrics. @@ -53,13 +53,13 @@ spec: ### Configuration -1. Edit the `cri.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your crio performance data. See the [sample cri.d/conf.yaml][1] for all available configuration options. +1. Edit the `cri.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your crio performance data. See the [sample cri.d/conf.yaml][2] for all available configuration options. -2. [Restart the Agent][2] +2. [Restart the Agent][3] ### Validation -[Run the Agent's `status` subcommand][2] and look for `cri` under the Checks section. +[Run the Agent's `status` subcommand][3] and look for `cri` under the Checks section. ## Data Collected @@ -70,7 +70,7 @@ CRI collect metrics about the resource usage of your containers running through CPU and memory metrics are collected out of the box and you can additionally collect some disk metrics if they are supported by your CRI (CRI-O doesn't support them for now) -See [metadata.csv][3] for a list of metrics provided by this integration. +See [metadata.csv][4] for a list of metrics provided by this integration. ### Service Checks @@ -82,9 +82,10 @@ CRI does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][4]. +Need help? Contact [Datadog support][5]. -[1]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/cri.d/conf.yaml.default -[2]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[3]: https://github.com/DataDog/integrations-core/blob/master/cri/metadata.csv -[4]: https://docs.datadoghq.com/help/ +[1]: https://app.datadoghq.com/account/settings#agent +[2]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/cri.d/conf.yaml.default +[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[4]: https://github.com/DataDog/integrations-core/blob/master/cri/metadata.csv +[5]: https://docs.datadoghq.com/help/ diff --git a/databricks/README.md b/databricks/README.md index 0278c4a6c593a..25cff81ddafbc 100644 --- a/databricks/README.md +++ b/databricks/README.md @@ -8,7 +8,7 @@ Monitor your [Databricks][1] clusters with the Datadog [Spark integration][2]. ### Installation -Monitor Databricks Spark applications with the [Datadog Spark integration][3]. Install the Datadog Agent on your clusters following the [Configuration](#configuration) instructions for your appropriate cluster. +Monitor Databricks Spark applications with the [Datadog Spark integration][3]. Install the [Datadog Agent][4] on your clusters following the [Configuration](#configuration) instructions for your appropriate cluster. ### Configuration @@ -30,9 +30,9 @@ Configure the Spark integration to monitor your Apache Spark Cluster on Databric ##### Install the Datadog Agent on Driver -Install the Datadog Agent on the driver node of the cluster. This is a updated version of the [Datadog Init Script][4] Databricks notebook example. +Install the Datadog Agent on the driver node of the cluster. This is a updated version of the [Datadog Init Script][5] Databricks notebook example. -After creating the `datadog-install-driver-only.sh` script, add the init script path in the [cluster configuration page][5]. +After creating the `datadog-install-driver-only.sh` script, add the init script path in the [cluster configuration page][6]. ```shell script %python @@ -115,7 +115,7 @@ fi ##### Install the Datadog Agent on driver and worker nodes -After creating the `datadog-install-driver-workers.sh` script, add the init script path in the [cluster configuration page][5]. +After creating the `datadog-install-driver-workers.sh` script, add the init script path in the [cluster configuration page][6]. ```shell script %python @@ -197,7 +197,7 @@ chmod a+x /tmp/start_datadog.sh #### Job cluster -After creating the `datadog-install-job-driver-mode.sh` script, add the init script path in the [cluster configuration page][5]. +After creating the `datadog-install-job-driver-mode.sh` script, add the init script path in the [cluster configuration page][6]. **Note**: Job clusters are monitored in `spark_driver_mode` with the Spark UI port. @@ -279,18 +279,18 @@ fi ### Validation -[Run the Agent's status subcommand][6] and look for `spark` under the Checks section. +[Run the Agent's status subcommand][7] and look for `spark` under the Checks section. ## Data Collected ### Metrics -See the [Spark integration documentation][7] for a list of metrics collected. +See the [Spark integration documentation][8] for a list of metrics collected. ### Service Checks -See the [Spark integration documentation][8] for the list of service checks collected. +See the [Spark integration documentation][9] for the list of service checks collected. ### Events @@ -298,7 +298,7 @@ The Databricks integration does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][10]. ## Further Reading @@ -307,9 +307,10 @@ Need help? Contact [Datadog support][9]. [1]: https://databricks.com/ [2]: https://docs.datadoghq.com/integrations/spark/?tab=host [3]: https://databricks.com/blog/2017/06/01/apache-spark-cluster-monitoring-with-databricks-and-datadog.html -[4]: https://docs.databricks.com/_static/notebooks/datadog-init-script.html -[5]: https://docs.databricks.com/clusters/init-scripts.html#configure-a-cluster-scoped-init-script-using-the-ui -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/?#agent-status-and-information -[7]: https://docs.datadoghq.com/integrations/spark/#metrics -[8]: https://docs.datadoghq.com/integrations/spark/#service-checks -[9]: https://docs.datadoghq.com/help/ +[4]: https://app.datadoghq.com/account/settings#agent +[5]: https://docs.databricks.com/_static/notebooks/datadog-init-script.html +[6]: https://docs.databricks.com/clusters/init-scripts.html#configure-a-cluster-scoped-init-script-using-the-ui +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/?#agent-status-and-information +[8]: https://docs.datadoghq.com/integrations/spark/#metrics +[9]: https://docs.datadoghq.com/integrations/spark/#service-checks +[10]: https://docs.datadoghq.com/help/ diff --git a/flink/README.md b/flink/README.md index 076a945af7072..e5878f2095e8b 100644 --- a/flink/README.md +++ b/flink/README.md @@ -118,6 +118,7 @@ Flink does not include any events. Need help? Contact [Datadog support][12]. + [1]: https://flink.apache.org/ [2]: https://ci.apache.org/projects/flink/flink-docs-release-1.9/monitoring/metrics.html#datadog-orgapacheflinkmetricsdatadogdatadoghttpreporter [3]: https://docs.datadoghq.com/api/?lang=bash#api-reference @@ -130,4 +131,3 @@ Need help? Contact [Datadog support][12]. [10]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information [11]: https://github.com/DataDog/integrations-core/blob/master/flink/metadata.csv [12]: https://docs.datadoghq.com/help/ - diff --git a/glusterfs/README.md b/glusterfs/README.md index 29adf92b9058f..7772cd087f9dc 100644 --- a/glusterfs/README.md +++ b/glusterfs/README.md @@ -11,12 +11,12 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The GlusterFS check is included in the [Datadog Agent][2] package. +The GlusterFS check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. ### Configuration -1. Edit the `glusterfs.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your GlusterFS performance data. See the [sample glusterfs.d/conf.yaml][3] for all available configuration options. +1. Edit the `glusterfs.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your GlusterFS performance data. See the [sample glusterfs.d/conf.yaml][4] for all available configuration options. ```yaml init_config: @@ -41,7 +41,7 @@ No additional installation is needed on your server. min_collection_interval: 60 ``` - **NOTE**: By default, [`gstatus`][4] internally calls the `gluster` command which requires running as superuser. Add a line like the following to your `sudoers` file: + **NOTE**: By default, [`gstatus`][5] internally calls the `gluster` command which requires running as superuser. Add a line like the following to your `sudoers` file: ```text dd-agent ALL=(ALL) NOPASSWD:/path/to/your/gstatus @@ -49,7 +49,7 @@ No additional installation is needed on your server. If your GlusterFS environment does not require root, set `use_sudo` configuration option to `false`. -2. [Restart the Agent][5]. +2. [Restart the Agent][6]. #### Log collection @@ -73,21 +73,21 @@ No additional installation is needed on your server. ``` - Change the `path` parameter value based on your environment. See the [sample conf.yaml][3] for all available configuration options. + Change the `path` parameter value based on your environment. See the [sample conf.yaml][4] for all available configuration options. - 3. [Restart the Agent][5]. + 3. [Restart the Agent][6]. - See [Datadog's documentation][6] for additional information on how to configure the Agent for log collection in Kubernetes environments. + See [Datadog's documentation][7] for additional information on how to configure the Agent for log collection in Kubernetes environments. ### Validation -[Run the Agent's status subcommand][7] and look for `glusterfs` under the Checks section. +[Run the Agent's status subcommand][8] and look for `glusterfs` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][8] for a list of metrics provided by this check. +See [metadata.csv][9] for a list of metrics provided by this check. ### Events @@ -95,20 +95,21 @@ GlusterFS does not include any events. ### Service Checks -See [service_checks.json][9] for a list of service checks provided by this integration. +See [service_checks.json][10] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][10]. +Need help? Contact [Datadog support][11]. [1]: https://www.redhat.com/en/technologies/storage/gluster [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://github.com/DataDog/integrations-core/blob/master/glusterfs/datadog_checks/glusterfs/data/conf.yaml.example -[4]: https://github.com/gluster/gstatus#install -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[6]: https://docs.datadoghq.com/agent/kubernetes/log/ -[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[8]: https://github.com/DataDog/integrations-core/blob/master/glusterfs/metadata.csv -[9]: https://github.com/DataDog/integrations-core/blob/master/glusterfs/assets/service_checks.json -[10]: https://docs.datadoghq.com/help/ +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://github.com/DataDog/integrations-core/blob/master/glusterfs/datadog_checks/glusterfs/data/conf.yaml.example +[5]: https://github.com/gluster/gstatus#install +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[7]: https://docs.datadoghq.com/agent/kubernetes/log/ +[8]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[9]: https://github.com/DataDog/integrations-core/blob/master/glusterfs/metadata.csv +[10]: https://github.com/DataDog/integrations-core/blob/master/glusterfs/assets/service_checks.json +[11]: https://docs.datadoghq.com/help/ diff --git a/gunicorn/README.md b/gunicorn/README.md index 1de74aeda566d..a4caa3f9b79d0 100644 --- a/gunicorn/README.md +++ b/gunicorn/README.md @@ -47,7 +47,7 @@ instances: - proc_name: ``` -2. [Restart the Agent][2] to begin sending Gunicorn metrics to Datadog. +2. [Restart the Agent][8] to begin sending Gunicorn metrics to Datadog. #### Log collection @@ -59,9 +59,9 @@ _Available for Agent versions >6.0_ logs_enabled: true ``` -2. Use the following command to configure the path of the access log file as explained in the [Gunicorn Documentation][8]: `--access-logfile ` +2. Use the following command to configure the path of the access log file as explained in the [Gunicorn Documentation][9]: `--access-logfile ` -3. Use the following command to configure the path of the error log file as explained in the [Gunicorn Documentation][9]: `--error-logfile FILE, --log-file ` +3. Use the following command to configure the path of the error log file as explained in the [Gunicorn Documentation][10]: `--error-logfile FILE, --log-file ` 4. Add this configuration block to your `gunicorn.d/conf.yaml` file to start collecting your Gunicorn logs: @@ -84,11 +84,11 @@ _Available for Agent versions >6.0_ Change the `service` and `path` parameter values and configure them for your environment. See the [sample gunicorn.yaml][5] for all available configuration options. -5. [Restart the Agent][2]. +5. [Restart the Agent][8]. ### Validation -[Run the Agent's status subcommand][10] and look for `gunicorn` under the Checks section. +[Run the Agent's status subcommand][11] and look for `gunicorn` under the Checks section. If the status is not `OK`, see the Troubleshooting section. @@ -103,7 +103,7 @@ udp 0 0 127.0.0.1:38374 127.0.0.1:8125 ESTABLISHED ### Metrics -See [metadata.csv][11] for a list of metrics provided by this integration. +See [metadata.csv][12] for a list of metrics provided by this integration. ### Events @@ -111,7 +111,7 @@ The Gunicorn check does not include any events. ### Service Checks -See [service_checks.json][12] for a list of service checks provided by this integration. +See [service_checks.json][13] for a list of service checks provided by this integration. ## Troubleshooting @@ -151,18 +151,19 @@ ubuntu 18463 18457 0 20:26 pts/0 00:00:00 gunicorn: worker [my_app] ## Further Reading -- [Monitor Gunicorn performance with Datadog][13] +- [Monitor Gunicorn performance with Datadog][14] [1]: https://raw.githubusercontent.com/DataDog/integrations-core/master/gunicorn/images/gunicorn-dash.png -[2]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://pypi.python.org/pypi/setproctitle [4]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory [5]: https://github.com/DataDog/integrations-core/blob/master/gunicorn/datadog_checks/gunicorn/data/conf.yaml.example [6]: https://docs.gunicorn.org/en/stable/settings.html#statsd-host [7]: https://docs.datadoghq.com/guides/dogstatsd/ -[8]: https://docs.gunicorn.org/en/stable/settings.html#accesslog -[9]: https://docs.gunicorn.org/en/stable/settings.html#errorlog -[10]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[11]: https://github.com/DataDog/integrations-core/blob/master/gunicorn/metadata.csv -[12]: https://github.com/DataDog/integrations-core/blob/master/gunicorn/assets/service_checks.json -[13]: https://www.datadoghq.com/blog/monitor-gunicorn-performance +[8]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[9]: https://docs.gunicorn.org/en/stable/settings.html#accesslog +[10]: https://docs.gunicorn.org/en/stable/settings.html#errorlog +[11]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[12]: https://github.com/DataDog/integrations-core/blob/master/gunicorn/metadata.csv +[13]: https://github.com/DataDog/integrations-core/blob/master/gunicorn/assets/service_checks.json +[14]: https://www.datadoghq.com/blog/monitor-gunicorn-performance diff --git a/hazelcast/README.md b/hazelcast/README.md index 12e0331e2f09d..b9f9219a5d87c 100644 --- a/hazelcast/README.md +++ b/hazelcast/README.md @@ -168,7 +168,7 @@ Need help? Contact [Datadog support][5]. [1]: https://hazelcast.org -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://github.com/DataDog/integrations-core/blob/master/hazelcast/datadog_checks/hazelcast/data/conf.yaml.example [4]: https://docs.datadoghq.com/integrations/java/ [5]: https://docs.datadoghq.com/help/ diff --git a/hive/README.md b/hive/README.md index cb35eb3959947..43d7fe4302b93 100644 --- a/hive/README.md +++ b/hive/README.md @@ -130,7 +130,7 @@ See [service_checks.json][13] for a list of service checks provided by this inte Need help? Contact [Datadog support][6]. [1]: https://cwiki.apache.org/confluence/display/Hive/Home -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://cwiki.apache.org/confluence/display/Hive/Configuration+Properties#ConfigurationProperties-Metrics [4]: https://github.com/DataDog/integrations-core/blob/master/hive/datadog_checks/hive/data/conf.yaml.example [5]: https://docs.datadoghq.com/integrations/java/ diff --git a/hivemq/README.md b/hivemq/README.md index b05c0e5bce4f9..571dfe9db2e54 100644 --- a/hivemq/README.md +++ b/hivemq/README.md @@ -120,7 +120,7 @@ See [service_checks.json][11] for a list of service checks provided by this inte Need help? Contact [Datadog support][5]. [1]: https://www.hivemq.com/hivemq/ -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://github.com/DataDog/integrations-core/blob/master/hivemq/datadog_checks/hivemq/data/conf.yaml.example [4]: https://docs.datadoghq.com/integrations/java [5]: https://docs.datadoghq.com/help diff --git a/hudi/README.md b/hudi/README.md index 075bfd676bac9..d601a9d112187 100644 --- a/hudi/README.md +++ b/hudi/README.md @@ -105,7 +105,7 @@ Need help? Contact [Datadog support][9]. [1]: https://hudi.apache.org/ [2]: https://github.com/apache/hudi/releases -[3]: https://docs.datadoghq.com/agent/ +[3]: https://app.datadoghq.com/account/settings#agent [4]: https://hudi.apache.org/docs/configurations#Metrics-Configurations [5]: https://hudi.apache.org/docs/metrics/#jmxmetricsreporter [6]: https://github.com/DataDog/integrations-core/blob/master/hudi/datadog_checks/hudi/data/conf.yaml.example diff --git a/hyperv/README.md b/hyperv/README.md index 3e9a7a1f37204..7e0549686cbc1 100644 --- a/hyperv/README.md +++ b/hyperv/README.md @@ -45,7 +45,7 @@ Additional helpful documentation, links, and articles: - [Monitor Microsoft Hyper-V with Datadog][8] [1]: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-on-windows-server -[2]: https://docs.datadoghq.com/agent/basic_agent_usage/windows/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://github.com/DataDog/integrations-core/blob/master/hyperv/datadog_checks/hyperv/data/conf.yaml.example [4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information diff --git a/ibm_db2/README.md b/ibm_db2/README.md index 87b1424897cd0..587747fc32821 100644 --- a/ibm_db2/README.md +++ b/ibm_db2/README.md @@ -165,7 +165,7 @@ Additional helpful documentation, links, and articles: [1]: https://raw.githubusercontent.com/DataDog/integrations-core/master/ibm_db2/images/dashboard_overview.png [2]: https://www.ibm.com/analytics/us/en/db2 -[3]: https://docs.datadoghq.com/agent/ +[3]: https://app.datadoghq.com/account/settings#agent [4]: https://github.com/ibmdb/python-ibmdb/tree/master/IBM_DB/ibm_db [5]: https://github.com/DataDog/integrations-core/blob/master/ibm_db2/datadog_checks/ibm_db2/data/conf.yaml.example [6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-restart-the-agent diff --git a/ibm_i/README.md b/ibm_i/README.md index 2edbca37b2049..cb8fe052d0347 100644 --- a/ibm_i/README.md +++ b/ibm_i/README.md @@ -10,14 +10,14 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The IBM i check is included in the [Datadog Agent][2] package. +The IBM i check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. #### ODBC driver The IBM i check uses the IBM i ODBC driver to connect remotely to the IBM i host. -Download the driver from the [IBM i Access - Client Solutions][3] page. Click on `Downloads for IBM i Access Client Solutions` and login to gain access to the downloads page. +Download the driver from the [IBM i Access - Client Solutions][4] page. Click on `Downloads for IBM i Access Client Solutions` and login to gain access to the downloads page. Choose the `ACS App Pkg` package for your platform, such as `ACS Linux App Pkg` for Linux hosts. Download the package and follow the installation instructions to install the driver. @@ -46,24 +46,24 @@ The name of the IBM i ODBC driver is needed to configure the IBM i check. #### IBM i check -1. Edit the `ibm_i.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your IBM i performance data. See the [sample ibm_i.d/conf.yaml][4] for all available configuration options. +1. Edit the `ibm_i.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your IBM i performance data. See the [sample ibm_i.d/conf.yaml][5] for all available configuration options. Use the driver name from the `obdcinst.ini` file. -2. [Restart the Agent][5]. +2. [Restart the Agent][6]. ### Validation -[Run the Agent's status subcommand][6] and look for `ibm_i` under the Checks section. +[Run the Agent's status subcommand][7] and look for `ibm_i` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this check. +See [metadata.csv][8] for a list of metrics provided by this check. ### Service Checks -See [service_checks.json][8] for a list of service checks provided by this integration. +See [service_checks.json][9] for a list of service checks provided by this integration. ### Events @@ -71,14 +71,15 @@ The IBM i check does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][10]. [1]: https://www.ibm.com/it-infrastructure/power/os/ibm-i [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://www.ibm.com/support/pages/ibm-i-access-client-solutions -[4]: https://github.com/DataDog/integrations-core/blob/master/ibm_i/datadog_checks/ibm_i/data/conf.yaml.example -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/ibm_i/metadata.csv -[8]: https://github.com/DataDog/integrations-core/blob/master/ibm_i/datadog_checks/ibm_i/assets/service_checks.json -[9]: https://docs.datadoghq.com/help/ +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://www.ibm.com/support/pages/ibm-i-access-client-solutions +[5]: https://github.com/DataDog/integrations-core/blob/master/ibm_i/datadog_checks/ibm_i/data/conf.yaml.example +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/ibm_i/metadata.csv +[9]: https://github.com/DataDog/integrations-core/blob/master/ibm_i/datadog_checks/ibm_i/assets/service_checks.json +[10]: https://docs.datadoghq.com/help/ diff --git a/ignite/README.md b/ignite/README.md index b1b2cb03b5c7e..4897591ee829a 100644 --- a/ignite/README.md +++ b/ignite/README.md @@ -110,7 +110,7 @@ Need help? Contact [Datadog support][4]. [1]: https://ignite.apache.org/ -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://apacheignite.readme.io/docs/logging#section-log4j [4]: https://github.com/DataDog/integrations-core/blob/master/ignite/datadog_checks/ignite/data/conf.yaml.example [5]: https://docs.datadoghq.com/integrations/java/ diff --git a/kube_apiserver_metrics/README.md b/kube_apiserver_metrics/README.md index 1d8dffb3b88e6..e041f0b8337b2 100644 --- a/kube_apiserver_metrics/README.md +++ b/kube_apiserver_metrics/README.md @@ -29,21 +29,21 @@ annotations: Then the Datadog Cluster Agent schedules the check(s) for each endpoint onto Datadog Agent(s). You can also run the check by configuring the endpoints directly in the `kube_apiserver_metrics.d/conf.yaml` file, in the `conf.d/` folder at the root of your [Agent's configuration directory][4]. -You must add `cluster_check: true` to your [configuration file][9] when using a static configuration file or ConfigMap to configure cluster checks. See the [sample kube_apiserver_metrics.d/conf.yaml][2] for all available configuration options. +You must add `cluster_check: true` to your [configuration file][10] when using a static configuration file or ConfigMap to configure cluster checks. See the [sample kube_apiserver_metrics.d/conf.yaml][5] for all available configuration options. By default the Agent running the check tries to get the service account bearer token to authenticate against the APIServer. If you are not using RBACs, set `bearer_token_auth` to `false`. -Finally, if you run the Datadog Agent on the master nodes, you can rely on [Autodiscovery][5] to schedule the check. It is automatic if you are running the official image `k8s.gcr.io/kube-apiserver`. +Finally, if you run the Datadog Agent on the master nodes, you can rely on [Autodiscovery][6] to schedule the check. It is automatic if you are running the official image `k8s.gcr.io/kube-apiserver`. ### Validation -[Run the Agent's status subcommand][6] and look for `kube_apiserver_metrics` under the Checks section. +[Run the Agent's status subcommand][7] and look for `kube_apiserver_metrics` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this integration. +See [metadata.csv][8] for a list of metrics provided by this integration. ### Service Checks @@ -55,14 +55,15 @@ Kube_apiserver_metrics does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][8]. +Need help? Contact [Datadog support][9]. [1]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver -[2]: https://github.com/DataDog/integrations-core/blob/master/kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/data/conf.yaml.example +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/ [4]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory -[5]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[6]: https://docs.datadoghq.com/agent/faq/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/kube_apiserver_metrics/metadata.csv -[8]: https://docs.datadoghq.com/help/ -[9]: https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/#set-up-cluster-checks +[5]: https://github.com/DataDog/integrations-core/blob/master/kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/data/conf.yaml.example +[6]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[7]: https://docs.datadoghq.com/agent/faq/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/kube_apiserver_metrics/metadata.csv +[9]: https://docs.datadoghq.com/help/ +[10]: https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/#set-up-cluster-checks diff --git a/kube_controller_manager/README.md b/kube_controller_manager/README.md index 33674b5b64c7a..f1ac9c70d5c1f 100644 --- a/kube_controller_manager/README.md +++ b/kube_controller_manager/README.md @@ -18,19 +18,19 @@ need to install anything else on your server. This integration requires access to the controller manager's metric endpoint. It is usually not exposed in container-as-a-service clusters. -1. Edit the `kube_controller_manager.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your kube_controller_manager performance data. See the [sample kube_controller_manager.d/conf.yaml][2] for all available configuration options. +1. Edit the `kube_controller_manager.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your kube_controller_manager performance data. See the [sample kube_controller_manager.d/conf.yaml][3] for all available configuration options. -2. [Restart the Agent][3] +2. [Restart the Agent][4] ### Validation -[Run the Agent's `status` subcommand][4] and look for `kube_controller_manager` under the Checks section. +[Run the Agent's `status` subcommand][5] and look for `kube_controller_manager` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][5] for a list of metrics provided by this integration. +See [metadata.csv][6] for a list of metrics provided by this integration. ### Events @@ -38,17 +38,18 @@ The Kubernetes Controller Manager check does not include any events. ### Service Checks -See [service_checks.json][6] for a list of service checks provided by this integration. +See [service_checks.json][7] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog Support][7]. +Need help? Contact [Datadog Support][8]. [1]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager -[2]: https://github.com/DataDog/integrations-core/blob/master/kube_controller_manager/datadog_checks/kube_controller_manager/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[5]: https://github.com/DataDog/integrations-core/blob/master/kube_controller_manager/metadata.csv -[6]: https://github.com/DataDog/integrations-core/blob/master/kube_controller_manager/assets/service_checks.json -[7]: https://docs.datadoghq.com/help/ +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/integrations-core/blob/master/kube_controller_manager/datadog_checks/kube_controller_manager/data/conf.yaml.example +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[6]: https://github.com/DataDog/integrations-core/blob/master/kube_controller_manager/metadata.csv +[7]: https://github.com/DataDog/integrations-core/blob/master/kube_controller_manager/assets/service_checks.json +[8]: https://docs.datadoghq.com/help/ diff --git a/kube_metrics_server/README.md b/kube_metrics_server/README.md index 067db27cc5a8a..f512ad3ab594d 100644 --- a/kube_metrics_server/README.md +++ b/kube_metrics_server/README.md @@ -19,16 +19,16 @@ The Kube_metrics_server check is included in the [Datadog Agent][2] package. No To configure this check for an Agent running on a host: -1. Edit the `kube_metrics_server.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your kube_metrics_server performance data. See the [sample kube_metrics_server.d/conf.yaml][2] for all available configuration options. +1. Edit the `kube_metrics_server.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your kube_metrics_server performance data. See the [sample kube_metrics_server.d/conf.yaml][3] for all available configuration options. -2. [Restart the Agent][3]. +2. [Restart the Agent][4]. #### Containerized -For containerized environments, see the [Kubernetes Autodiscovery Integration Templates][4] for guidance on applying the parameters below. +For containerized environments, see the [Kubernetes Autodiscovery Integration Templates][5] for guidance on applying the parameters below. | Parameter | Value | | -------------------- | ---------------------------------------------------- | @@ -47,17 +47,17 @@ If your endpoint is secured, additional configuration is required: 2. Mount the related certificate file in the Agent pod. -3. Apply your SSL configuration. Refer to the [default configuration file][5] for more information. +3. Apply your SSL configuration. Refer to the [default configuration file][6] for more information. ### Validation -[Run the Agent's status subcommand][6] and look for `kube_metrics_server` under the Checks section. +[Run the Agent's status subcommand][7] and look for `kube_metrics_server` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this integration. +See [metadata.csv][8] for a list of metrics provided by this integration. ### Events @@ -65,19 +65,20 @@ kube_metrics_server does not include any events. ### Service Checks -See [service_checks.json][8] for a list of service checks provided by this integration. +See [service_checks.json][9] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][10]. [1]: https://github.com/kubernetes-incubator/metrics-server -[2]: https://github.com/DataDog/integrations-core/blob/master/kube_metrics_server/datadog_checks/kube_metrics_server/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#restart-the-agent -[4]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[5]: https://github.com/DataDog/integrations-core/blob/master/openmetrics/datadog_checks/openmetrics/data/conf.yaml.example -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/kube_metrics_server/metadata.csv -[8]: https://github.com/DataDog/integrations-core/blob/master/kube_metrics_server/assets/service_checks.json -[9]: https://docs.datadoghq.com/help/ +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/integrations-core/blob/master/kube_metrics_server/datadog_checks/kube_metrics_server/data/conf.yaml.example +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#restart-the-agent +[5]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[6]: https://github.com/DataDog/integrations-core/blob/master/openmetrics/datadog_checks/openmetrics/data/conf.yaml.example +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/kube_metrics_server/metadata.csv +[9]: https://github.com/DataDog/integrations-core/blob/master/kube_metrics_server/assets/service_checks.json +[10]: https://docs.datadoghq.com/help/ diff --git a/kube_scheduler/README.md b/kube_scheduler/README.md index c8085b41f2ca7..78be883bb83c8 100644 --- a/kube_scheduler/README.md +++ b/kube_scheduler/README.md @@ -19,13 +19,13 @@ See the [Autodiscovery Integration Templates][3] for guidance on applying the pa #### Metric collection -1. Edit the `kube_scheduler.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your kube_scheduler performance data. See the [sample kube_scheduler.d/conf.yaml][2] for all available configuration options. +1. Edit the `kube_scheduler.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your kube_scheduler performance data. See the [sample kube_scheduler.d/conf.yaml][4] for all available configuration options. -2. [Restart the Agent][4]. +2. [Restart the Agent][5]. #### Log collection -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][5]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][6]. | Parameter | Value | |----------------|-------------------------------------------| @@ -33,13 +33,13 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ ### Validation -[Run the Agent's status subcommand][6] and look for `kube_scheduler` under the Checks section. +[Run the Agent's status subcommand][7] and look for `kube_scheduler` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this integration. +See [metadata.csv][8] for a list of metrics provided by this integration. ### Events @@ -47,19 +47,20 @@ Kube Scheduler does not include any events. ### Service Checks -See [service_checks.json][8] for a list of service checks provided by this integration. +See [service_checks.json][9] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][10]. [1]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler -[2]: https://github.com/DataDog/integrations-core/blob/master/kube_scheduler/datadog_checks/kube_scheduler/data/conf.yaml.example +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#restart-the-agent -[5]: https://docs.datadoghq.com/agent/kubernetes/log/ -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/kube_scheduler/metadata.csv -[8]: https://github.com/DataDog/integrations-core/blob/master/kube_scheduler/assets/service_checks.json -[9]: https://docs.datadoghq.com/help/ +[4]: https://github.com/DataDog/integrations-core/blob/master/kube_scheduler/datadog_checks/kube_scheduler/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#restart-the-agent +[6]: https://docs.datadoghq.com/agent/kubernetes/log/ +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/kube_scheduler/metadata.csv +[9]: https://github.com/DataDog/integrations-core/blob/master/kube_scheduler/assets/service_checks.json +[10]: https://docs.datadoghq.com/help/ diff --git a/marklogic/README.md b/marklogic/README.md index 89fa4e4f13138..71926515fed8a 100644 --- a/marklogic/README.md +++ b/marklogic/README.md @@ -10,13 +10,13 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The MarkLogic check is included in the [Datadog Agent][2] package. +The MarkLogic check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. #### Prepare MarkLogic -Using the API or the Admin interface, create a user for the Datadog Agent with the [`manage-user`][3] role permissions at minimum. -If you plan to use the `enable_health_service_checks` configuration, give the Datadog MarkLogic user at least the [`manage-admin`][4] role. +Using the API or the Admin interface, create a user for the Datadog Agent with the [`manage-user`][4] role permissions at minimum. +If you plan to use the `enable_health_service_checks` configuration, give the Datadog MarkLogic user at least the [`manage-admin`][5] role. ##### API @@ -25,7 +25,7 @@ If you plan to use the `enable_health_service_checks` configuration, give the Da curl -X POST --anyauth --user : -i -H "Content-Type: application/json" -d '{"user-name": "", "password": "", "roles": {"role": "manage-user"}}' http://:8002/manage/v2/users ``` Use the correct `` and ``, and replace `` and `` with the username and password that the Datadog Agent uses. - For more information about the endpoint, see the [MarkLogic documentation][5]. + For more information about the endpoint, see the [MarkLogic documentation][6]. 2. To verify the user was created with enough permissions: ```shell @@ -53,7 +53,7 @@ If you plan to use the `enable_health_service_checks` configuration, give the Da ("http://marklogic.com/dev_modules")) ``` - For more information about the query, see the [MarkLogic documentation][6]. + For more information about the query, see the [MarkLogic documentation][7]. 4. To verify that the user was created with enough permissions, use `` and `` to authenticate at `http://:8002` (default port). @@ -61,9 +61,9 @@ If you plan to use the `enable_health_service_checks` configuration, give the Da #### Host -1. Edit the `marklogic.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your MarkLogic performance data. See the [sample `marklogic.d/conf.yaml` file][7] for all available configuration options. For user-related settings in the config file, use the Datadog Agent user you created. +1. Edit the `marklogic.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your MarkLogic performance data. See the [sample `marklogic.d/conf.yaml` file][8] for all available configuration options. For user-related settings in the config file, use the Datadog Agent user you created. -2. [Restart the Agent][8]. +2. [Restart the Agent][9]. #### Log collection @@ -87,19 +87,19 @@ _Available for Agent versions >6.0_ source: marklogic ``` - Change the `path` value and configure it for your environment. See the [sample `marklogic.d/conf.yaml` file][7] for all available configuration options. + Change the `path` value and configure it for your environment. See the [sample `marklogic.d/conf.yaml` file][8] for all available configuration options. -3. [Restart the Agent][8]. +3. [Restart the Agent][9]. ### Validation -[Run the Agent's status subcommand][9] and look for `marklogic` under the Checks section. +[Run the Agent's status subcommand][10] and look for `marklogic` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][10] for a list of metrics provided by this check. +See [metadata.csv][11] for a list of metrics provided by this check. ### Events @@ -107,22 +107,23 @@ MarkLogic does not include any events. ### Service Checks -See [service_checks.json][11] for a list of service checks provided by this integration. +See [service_checks.json][12] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][12]. +Need help? Contact [Datadog support][13]. [1]: https://www.marklogic.com [2]: https://docs.datadoghq.com/agent/kubernetes/integrations -[3]: https://docs.marklogic.com/guide/admin/pre_def_roles#id_64197 -[4]: https://docs.marklogic.com/guide/admin/pre_def_roles#id_28243 -[5]: https://docs.marklogic.com/REST/POST/manage/v2/users -[6]: https://docs.marklogic.com/sec:create-user -[7]: https://github.com/DataDog/integrations-core/blob/master/marklogic/datadog_checks/marklogic/data/conf.yaml.example -[8]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[9]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[10]: https://github.com/DataDog/integrations-core/blob/master/marklogic/metadata.csv -[11]: https://github.com/DataDog/integrations-core/blob/master/marklogic/assets/service_checks.json -[12]: https://docs.datadoghq.com/help +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://docs.marklogic.com/guide/admin/pre_def_roles#id_64197 +[5]: https://docs.marklogic.com/guide/admin/pre_def_roles#id_28243 +[6]: https://docs.marklogic.com/REST/POST/manage/v2/users +[7]: https://docs.marklogic.com/sec:create-user +[8]: https://github.com/DataDog/integrations-core/blob/master/marklogic/datadog_checks/marklogic/data/conf.yaml.example +[9]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[10]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[11]: https://github.com/DataDog/integrations-core/blob/master/marklogic/metadata.csv +[12]: https://github.com/DataDog/integrations-core/blob/master/marklogic/assets/service_checks.json +[13]: https://docs.datadoghq.com/help diff --git a/mesos_master/README.md b/mesos_master/README.md index 1301799d598d4..b1322ec33303f 100644 --- a/mesos_master/README.md +++ b/mesos_master/README.md @@ -1,6 +1,6 @@ # Mesos_master Check -This check collects metrics for Mesos masters. If you are looking for the the metrics for Mesos slave, see the [Mesos Slave Integration documentation][1]. +This check collects metrics for Mesos masters. If you are looking for the metrics for Mesos slave, see the [Mesos Slave Integration documentation][1]. ![Mesos master Dashboard][2] diff --git a/nginx_ingress_controller/README.md b/nginx_ingress_controller/README.md index 4d843e83a0c3e..fe08599044298 100644 --- a/nginx_ingress_controller/README.md +++ b/nginx_ingress_controller/README.md @@ -17,14 +17,14 @@ The `nginx-ingress-controller` check is included in the [Datadog Agent][2] packa #### Host -If your Agent is running on a host, edit the `nginx_ingress_controller.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your NGINX ingress controller metrics. See the [sample nginx_ingress_controller.d/conf.yaml][2] for all available configuration options. Then [Restart the Agent][3]. +If your Agent is running on a host, edit the `nginx_ingress_controller.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your NGINX ingress controller metrics. See the [sample nginx_ingress_controller.d/conf.yaml][3] for all available configuration options. Then [Restart the Agent][4]. #### Containerized -For containerized environments, see the [Autodiscovery Integration Templates][4] for guidance on applying the parameters below. +For containerized environments, see the [Autodiscovery Integration Templates][5] for guidance on applying the parameters below. @@ -43,7 +43,7 @@ For example these annotations, enable both the `nginx` and `nginx-ingress-contro | `` | `[{},{}]` | | `` | `[{"nginx_status_url": "http://%%host%%:18080/nginx_status"},{"prometheus_url": "http://%%host%%:10254/metrics"}]` | -See the [sample nginx_ingress_controller.d/conf.yaml][2] for all available configuration options. +See the [sample nginx_ingress_controller.d/conf.yaml][3] for all available configuration options. **Note**: For `nginx-ingress-controller` 0.23.0+ versions, the `nginx` server listening in port `18080` was removed, it can be restored by adding the following `http-snippet` to the configuration configmap: @@ -67,7 +67,7 @@ See the [sample nginx_ingress_controller.d/conf.yaml][2] for all available confi _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection][5]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection][6]. | Parameter | Value | | -------------- | ------------------------------------------------------------------ | @@ -75,13 +75,13 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ ### Validation -[Run the Agent's status subcommand][6] and look for `nginx_ingress_controller` under the Checks section. +[Run the Agent's status subcommand][7] and look for `nginx_ingress_controller` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this integration. +See [metadata.csv][8] for a list of metrics provided by this integration. ### Events @@ -93,13 +93,14 @@ The NGINX Ingress Controller does not include any service checks. ## Troubleshooting -Need help? Contact [Datadog support][8]. +Need help? Contact [Datadog support][9]. [1]: https://kubernetes.github.io/ingress-nginx -[2]: https://github.com/DataDog/integrations-core/blob/master/nginx_ingress_controller/datadog_checks/nginx_ingress_controller/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[4]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[5]: https://docs.datadoghq.com/agent/kubernetes/log/ -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/nginx_ingress_controller/metadata.csv -[8]: https://docs.datadoghq.com/help/ +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/integrations-core/blob/master/nginx_ingress_controller/datadog_checks/nginx_ingress_controller/data/conf.yaml.example +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[5]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[6]: https://docs.datadoghq.com/agent/kubernetes/log/ +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/nginx_ingress_controller/metadata.csv +[9]: https://docs.datadoghq.com/help/ diff --git a/oom_kill/README.md b/oom_kill/README.md index 216e640018af9..fc51dc4a3847e 100644 --- a/oom_kill/README.md +++ b/oom_kill/README.md @@ -65,7 +65,7 @@ The OOM Kill check submits an event for each OOM Kill that includes the killed p Need help? Contact [Datadog support][7]. -[1]: https://docs.datadoghq.com/agent/guide/ +[1]: https://app.datadoghq.com/account/settings#agent [2]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/oom_kill.d/conf.yaml.example [3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [4]: https://github.com/helm/charts/tree/master/stable/datadog diff --git a/openstack/README.md b/openstack/README.md index 8b5f88a4862b9..18dd9fb1eafb6 100644 --- a/openstack/README.md +++ b/openstack/README.md @@ -4,7 +4,7 @@ ## Overview -**Note**: This integration only applies to OpenStack v12 and below (non-containerized OpenStack). If you are looking to collect metrics from OpenStack v13+ (containerized OpenStack), use the [OpenStack Controller integration][13]. +**Note**: This integration only applies to OpenStack v12 and below (non-containerized OpenStack). If you are looking to collect metrics from OpenStack v13+ (containerized OpenStack), use the [OpenStack Controller integration][2]. Get metrics from OpenStack service in real time to: @@ -15,7 +15,7 @@ Get metrics from OpenStack service in real time to: ### Installation -To capture your OpenStack metrics, [install the Agent][2] on your hosts running hypervisors. +To capture your OpenStack metrics, [install the Agent][3] on your hosts running hypervisors. ### Configuration @@ -78,7 +78,7 @@ You may need to restart your Keystone, Neutron, and Nova API services to ensure #### Agent configuration -1. Configure the Datadog Agent to connect to your Keystone server, and specify individual projects to monitor. Edit the `openstack.d/conf.yaml` file in the `conf.d/` folder at the root of your [Agent's configuration directory][3] with the configuration below. See the [sample openstack.d/conf.yaml][4] for all available configuration options: +1. Configure the Datadog Agent to connect to your Keystone server, and specify individual projects to monitor. Edit the `openstack.d/conf.yaml` file in the `conf.d/` folder at the root of your [Agent's configuration directory][4] with the configuration below. See the [sample openstack.d/conf.yaml][5] for all available configuration options: ```yaml init_config: @@ -111,7 +111,7 @@ You may need to restart your Keystone, Neutron, and Nova API services to ensure id: "" ``` -2. [Restart the Agent][5]. +2. [Restart the Agent][6]. ##### Log collection @@ -130,18 +130,18 @@ You may need to restart your Keystone, Neutron, and Nova API services to ensure source: openstack ``` - Change the `path` parameter value and configure them for your environment. See the [sample openstack.d/conf.yaml][4] for all available configuration options. + Change the `path` parameter value and configure them for your environment. See the [sample openstack.d/conf.yaml][5] for all available configuration options. ### Validation -[Run the Agent's `status` subcommand][6] and look for `openstack` under the Checks section. +[Run the Agent's `status` subcommand][7] and look for `openstack` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][7] for a list of metrics provided by this integration. +See [metadata.csv][8] for a list of metrics provided by this integration. ### Events @@ -149,31 +149,31 @@ The OpenStack check does not include any events. ### Service Checks -See [service_checks.json][8] for a list of service checks provided by this integration. +See [service_checks.json][9] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][10]. ## Further Reading -To get a better idea of how (or why) to integrate your Nova OpenStack compute module with Datadog, check out Datadog's [series of blog posts][10] about it. +To get a better idea of how (or why) to integrate your Nova OpenStack compute module with Datadog, check out Datadog's [series of blog posts][11] about it. See also these other Datadog blog posts: -- [Install OpenStack in two commands for dev and test][11] -- [OpenStack: host aggregates, flavors, and availability zones][12] +- [Install OpenStack in two commands for dev and test][12] +- [OpenStack: host aggregates, flavors, and availability zones][13] [1]: https://raw.githubusercontent.com/DataDog/integrations-core/master/openstack/images/openstack_dashboard.png -[2]: https://app.datadoghq.com/account/settings#agent -[3]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory -[4]: https://github.com/DataDog/integrations-core/blob/master/openstack/datadog_checks/openstack/data/conf.yaml.example -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[7]: https://github.com/DataDog/integrations-core/blob/master/openstack/metadata.csv -[8]: https://github.com/DataDog/integrations-core/blob/master/openstack/assets/service_checks.json -[9]: https://docs.datadoghq.com/help/ -[10]: https://www.datadoghq.com/blog/openstack-monitoring-nova -[11]: https://www.datadoghq.com/blog/install-openstack-in-two-commands -[12]: https://www.datadoghq.com/blog/openstack-host-aggregates-flavors-availability-zones -[13]: https://docs.datadoghq.com/integrations/openstack_controller +[2]: https://docs.datadoghq.com/integrations/openstack_controller +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory +[5]: https://github.com/DataDog/integrations-core/blob/master/openstack/datadog_checks/openstack/data/conf.yaml.example +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[8]: https://github.com/DataDog/integrations-core/blob/master/openstack/metadata.csv +[9]: https://github.com/DataDog/integrations-core/blob/master/openstack/assets/service_checks.json +[10]: https://docs.datadoghq.com/help/ +[11]: https://www.datadoghq.com/blog/openstack-monitoring-nova +[12]: https://www.datadoghq.com/blog/install-openstack-in-two-commands +[13]: https://www.datadoghq.com/blog/openstack-host-aggregates-flavors-availability-zones diff --git a/openstack_controller/README.md b/openstack_controller/README.md index 136a73aaa2b66..2c0926b1abbd3 100644 --- a/openstack_controller/README.md +++ b/openstack_controller/README.md @@ -2,15 +2,15 @@ ## Overview -**Note**: This integration only applies to OpenStack v13+ (containerized OpenStack). If you are looking to collect metrics from OpenStack v12 and below (non-containerized OpenStack), use the [OpenStack integration][8]. +**Note**: This integration only applies to OpenStack v13+ (containerized OpenStack). If you are looking to collect metrics from OpenStack v12 and below (non-containerized OpenStack), use the [OpenStack integration][1]. -This check monitors [OpenStack][1] from the controller node. +This check monitors [OpenStack][2] from the controller node. ## Setup ### Installation -The OpenStack Controller check is included in the [Datadog Agent][2] package, so you do not need to install anything else on your server. +The OpenStack Controller check is included in the [Datadog Agent][3] package, so you do not need to install anything else on your server. ### Configuration @@ -22,7 +22,7 @@ Create a `datadog` user that is used in your `openstack_controller.d/conf.yaml` #### Agent configuration -1. Edit the `openstack_controller.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your OpenStack Controller performance data. See the [sample openstack_controller.d/conf.yaml][2] for all available configuration options: +1. Edit the `openstack_controller.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your OpenStack Controller performance data. See the [sample openstack_controller.d/conf.yaml][4] for all available configuration options: ```yaml init_config: @@ -48,7 +48,7 @@ Create a `datadog` user that is used in your `openstack_controller.d/conf.yaml` id: "" ``` -2. [Restart the Agent][3] +2. [Restart the Agent][5] ##### Log collection @@ -67,18 +67,18 @@ Create a `datadog` user that is used in your `openstack_controller.d/conf.yaml` source: openstack ``` - Change the `path` parameter value and configure them for your environment. See the [sample openstack_controller.d/conf.yaml][2] for all available configuration options. + Change the `path` parameter value and configure them for your environment. See the [sample openstack_controller.d/conf.yaml][4] for all available configuration options. ### Validation -[Run the Agent's `status` subcommand][4] and look for `openstack_controller` under the Checks section. +[Run the Agent's `status` subcommand][6] and look for `openstack_controller` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][5] for a list of metrics provided by this integration. +See [metadata.csv][7] for a list of metrics provided by this integration. ### Events @@ -86,18 +86,19 @@ OpenStack Controller does not include any events. ### Service Checks -See [service_checks.json][6] for a list of service checks provided by this integration. +See [service_checks.json][8] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][7]. +Need help? Contact [Datadog support][9]. -[1]: https://www.openstack.org -[2]: https://github.com/DataDog/integrations-core/blob/master/openstack_controller/datadog_checks/openstack_controller/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[5]: https://github.com/DataDog/integrations-core/blob/master/openstack_controller/metadata.csv -[6]: https://github.com/DataDog/integrations-core/blob/master/openstack_controller/assets/service_checks.json -[7]: https://docs.datadoghq.com/help/ -[8]: https://docs.datadoghq.com/integrations/openstack/ +[1]: https://docs.datadoghq.com/integrations/openstack/ +[2]: https://www.openstack.org +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://github.com/DataDog/integrations-core/blob/master/openstack_controller/datadog_checks/openstack_controller/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/openstack_controller/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/openstack_controller/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/otel/README.md b/otel/README.md index ed607bad32082..007ca3df77128 100644 --- a/otel/README.md +++ b/otel/README.md @@ -88,10 +88,10 @@ The OpenTelemetry Collector does not include any events. Need help? Contact [Datadog support][6]. + [1]: https://opentelemetry.io/docs/collector/getting-started/ [2]: https://app.datadoghq.com/organization-settings/api-keys [3]: https://docs.datadoghq.com/tracing/setup_overview/open_standards/#opentelemetry-collector-datadog-exporter [4]: https://github.com/open-telemetry/opentelemetry-collector/tree/master/receiver/hostmetricsreceiver [5]: https://github.com/DataDog/integrations-core/blob/master/opentelemetry/metadata.csv [6]: https://docs.datadoghq.com/help/ - diff --git a/process/README.md b/process/README.md index 12609937e37a8..8a8d223781d6f 100644 --- a/process/README.md +++ b/process/README.md @@ -11,14 +11,14 @@ The Process Check lets you: ### Installation -The Process Check is included in the [Datadog Agent][1] package, so you don't need to install anything else on your server. +The Process Check is included in the [Datadog Agent][2] package, so you don't need to install anything else on your server. ### Configuration Unlike many checks, the Process Check doesn't monitor anything useful by default. You must configure which processes you want to monitor, and how. -1. While there's no standard default check configuration, here's an example `process.d/conf.yaml` that monitors SSH/SSHD processes. See the [sample process.d/conf.yaml][2] for all available configuration options: +1. While there's no standard default check configuration, here's an example `process.d/conf.yaml` that monitors SSH/SSHD processes. See the [sample process.d/conf.yaml][3] for all available configuration options: ```yaml init_config: @@ -48,17 +48,17 @@ Unlike many checks, the Process Check doesn't monitor anything useful by default dd-agent ALL=NOPASSWD: /bin/ls /proc/*/fd/ ``` -2. [Restart the Agent][3]. +2. [Restart the Agent][4]. ### Validation -[Run the Agent's `status` subcommand][4] and look for `process` under the Checks section. +[Run the Agent's `status` subcommand][5] and look for `process` under the Checks section. ### Metrics notes **Note**: Some metrics are not available on Linux or OSX: -- Process I/O metrics are **not** available on Linux or OSX since the files that the Agent reads (`/proc//io`) are only readable by the process's owner. For more information, [read the Agent FAQ][5] +- Process I/O metrics are **not** available on Linux or OSX since the files that the Agent reads (`/proc//io`) are only readable by the process's owner. For more information, [read the Agent FAQ][6] - `system.cpu.iowait` is not available on Windows. All metrics are per `instance` configured in process.yaml, and are tagged `process_name:`. @@ -72,7 +72,7 @@ For the full list of metrics, see the [Metrics section](#metrics). ### Metrics -See [metadata.csv][6] for a list of metrics provided by this check. +See [metadata.csv][7] for a list of metrics provided by this check. ### Events @@ -80,22 +80,23 @@ The Process Check does not include any events. ### Service Checks -See [service_checks.json][7] for a list of service checks provided by this integration. +See [service_checks.json][8] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][8]. +Need help? Contact [Datadog support][9]. ## Further Reading -To get a better idea of how (or why) to monitor process resource consumption with Datadog, check out this [series of blog posts][9] about it. +To get a better idea of how (or why) to monitor process resource consumption with Datadog, check out this [series of blog posts][10] about it. [1]: https://docs.datadoghq.com/monitors/create/types/process_check/?tab=checkalert -[2]: https://github.com/DataDog/integrations-core/blob/master/process/datadog_checks/process/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[5]: https://docs.datadoghq.com/agent/faq/why-don-t-i-see-the-system-processes-open-file-descriptors-metric/ -[6]: https://github.com/DataDog/integrations-core/blob/master/process/metadata.csv -[7]: https://github.com/DataDog/integrations-core/blob/master/process/assets/service_checks.json -[8]: https://docs.datadoghq.com/help/ -[9]: https://www.datadoghq.com/blog/process-check-monitoring +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/integrations-core/blob/master/process/datadog_checks/process/data/conf.yaml.example +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[6]: https://docs.datadoghq.com/agent/faq/why-don-t-i-see-the-system-processes-open-file-descriptors-metric/ +[7]: https://github.com/DataDog/integrations-core/blob/master/process/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/process/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ +[10]: https://www.datadoghq.com/blog/process-check-monitoring diff --git a/prometheus/README.md b/prometheus/README.md index 6457cd6703e8b..b6461405c7b2c 100644 --- a/prometheus/README.md +++ b/prometheus/README.md @@ -20,7 +20,7 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The Prometheus check is packaged with the Agent starting version 6.1.0. +The Prometheus check is packaged with the [Datadog Agent][4] starting version 6.1.0. ### Configuration @@ -36,7 +36,7 @@ Each instance is at least composed of: When listing metrics, it's possible to use the wildcard `*` like this `- *` to retrieve all matching metrics. **Note:** use wildcards with caution as it can potentially send a lot of custom metrics. -More advanced settings (ssl, labels joining, custom tags,...) are documented in the [sample prometheus.d/conf.yaml][4] +More advanced settings (ssl, labels joining, custom tags,...) are documented in the [sample prometheus.d/conf.yaml][5] Due to the nature of this integration, it's possible to submit a high number of custom metrics to Datadog. To provide users control over the maximum number of metrics sent in the case of configuration errors or input changes, the check has a default limit of 2000 metrics. If needed, this limit can be increased by setting the option `max_returned_metrics` in the `prometheus.d/conf.yaml` file. @@ -44,7 +44,7 @@ If `send_monotonic_counter: True`, the Agent sends the deltas of the values in q ### Validation -[Run the Agent's `status` subcommand][5] and look for `prometheus` under the Checks section. +[Run the Agent's `status` subcommand][6] and look for `prometheus` under the Checks section. ## Data Collected @@ -90,20 +90,21 @@ sudo systemctl restart prometheus.service alertmanager.service ## Troubleshooting -Need help? Contact [Datadog support][6]. +Need help? Contact [Datadog support][7]. ## Further Reading -- [Introducing Prometheus support for Datadog Agent 6][7] -- [Configuring a Prometheus Check][8] -- [Writing a custom Prometheus Check][9] +- [Introducing Prometheus support for Datadog Agent 6][8] +- [Configuring a Prometheus Check][9] +- [Writing a custom Prometheus Check][10] [1]: https://docs.datadoghq.com/integrations/openmetrics/ [2]: https://docs.datadoghq.com/getting_started/integrations/prometheus/ [3]: https://docs.datadoghq.com/getting_started/integrations/prometheus?tab=docker#configuration -[4]: https://github.com/DataDog/integrations-core/blob/master/prometheus/datadog_checks/prometheus/data/conf.yaml.example -[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[6]: https://docs.datadoghq.com/help/ -[7]: https://www.datadoghq.com/blog/monitor-prometheus-metrics -[8]: https://docs.datadoghq.com/agent/prometheus/ -[9]: https://docs.datadoghq.com/developers/prometheus/ +[4]: https://app.datadoghq.com/account/settings#agent +[5]: https://github.com/DataDog/integrations-core/blob/master/prometheus/datadog_checks/prometheus/data/conf.yaml.example +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://docs.datadoghq.com/help/ +[8]: https://www.datadoghq.com/blog/monitor-prometheus-metrics +[9]: https://docs.datadoghq.com/agent/prometheus/ +[10]: https://docs.datadoghq.com/developers/prometheus/ diff --git a/proxysql/README.md b/proxysql/README.md index 7cff0630462c6..dac0032d03c76 100644 --- a/proxysql/README.md +++ b/proxysql/README.md @@ -119,7 +119,7 @@ Need help? Contact [Datadog support][11]. [1]: https://proxysql.com/ [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://docs.datadoghq.com/agent/ +[3]: https://app.datadoghq.com/account/settings#agent [4]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory [5]: https://github.com/DataDog/integrations-core/blob/master/proxysql/datadog_checks/proxysql/data/conf.yaml.example [6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent diff --git a/rethinkdb/README.md b/rethinkdb/README.md index 53b8b95053d47..68ec0baf4b325 100644 --- a/rethinkdb/README.md +++ b/rethinkdb/README.md @@ -103,7 +103,7 @@ Need help? Contact [Datadog support][13]. [1]: https://rethinkdb.com [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://docs.datadoghq.com/agent/ +[3]: https://app.datadoghq.com/account/settings#agent [4]: https://rethinkdb.com/docs/permissions-and-accounts/ [5]: https://rethinkdb.com/docs/security/#the-admin-account [6]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory diff --git a/sap_hana/README.md b/sap_hana/README.md index 30bc98f343250..5e71257b15bbc 100644 --- a/sap_hana/README.md +++ b/sap_hana/README.md @@ -102,7 +102,7 @@ Need help? Contact [Datadog support][9]. [1]: https://www.sap.com/products/hana.html -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://help.sap.com/viewer/0eec0d68141541d1b07893a39944924e/2.0.02/en-US/d12c86af7cb442d1b9f8520e2aba7758.html [4]: https://github.com/DataDog/integrations-core/blob/master/sap_hana/datadog_checks/sap_hana/data/conf.yaml.example [5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-restart-the-agent diff --git a/scylla/README.md b/scylla/README.md index e436f28d20a86..7154c52304563 100644 --- a/scylla/README.md +++ b/scylla/README.md @@ -73,7 +73,7 @@ Need help? Contact [Datadog support][10]. [1]: https://scylladb.com -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://github.com/DataDog/integrations-core/blob/master/scylla/datadog_checks/scylla/data/conf.yaml.example [4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [5]: https://docs.scylladb.com/getting-started/logging/ diff --git a/sidekiq/README.md b/sidekiq/README.md index 06c80d12fae99..c831d3db715f0 100644 --- a/sidekiq/README.md +++ b/sidekiq/README.md @@ -70,15 +70,15 @@ No additional installation is needed on your server. worker: "$1" ``` -4. [Restart the Agent][4]. +4. [Restart the Agent][8]. ## Data Collected ### Metrics -See [metadata.csv][8] for a list of metrics provided by this integration. +See [metadata.csv][9] for a list of metrics provided by this integration. -The Sidekiq integration also allows custom metrics, see the [Sidekiq documentation][9] for custom metric inspiration. +The Sidekiq integration also allows custom metrics, see the [Sidekiq documentation][10] for custom metric inspiration. ### Log collection @@ -98,9 +98,9 @@ The Sidekiq integration also allows custom metrics, see the [Sidekiq documentati service: ``` - Change the `path` and `service` parameter values and configure them for your environment. If you cannot find your logs, [see the Sidekiq documentation on more details about logs][10]. + Change the `path` and `service` parameter values and configure them for your environment. If you cannot find your logs, [see the Sidekiq documentation on more details about logs][11]. -3. [Restart the Agent][4]. +3. [Restart the Agent][8]. ### Service Checks @@ -112,16 +112,17 @@ Sidekiq does not include any events. ## Troubleshooting -Need help? Contact [Datadog support][11]. +Need help? Contact [Datadog support][12]. [1]: https://sidekiq.org/ [2]: https://docs.datadoghq.com/developers/dogstatsd/ [3]: https://github.com/DataDog/dogstatsd-ruby -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[4]: https://app.datadoghq.com/account/settings#agent [5]: https://github.com/mperham/sidekiq/wiki/Pro-Metrics [6]: https://github.com/mperham/sidekiq/wiki/Ent-Historical-Metrics [7]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/ -[8]: https://github.com/DataDog/integrations-core/blob/master/sidekiq/metadata.csv -[9]: https://github.com/mperham/sidekiq/wiki/Ent-Historical-Metrics#custom -[10]: https://github.com/mperham/sidekiq/wiki/Logging#log-file -[11]: https://docs.datadoghq.com/help/ +[8]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[9]: https://github.com/DataDog/integrations-core/blob/master/sidekiq/metadata.csv +[10]: https://github.com/mperham/sidekiq/wiki/Ent-Historical-Metrics#custom +[11]: https://github.com/mperham/sidekiq/wiki/Logging#log-file +[12]: https://docs.datadoghq.com/help/ diff --git a/singlestore/README.md b/singlestore/README.md index e47d0e75e7b73..a25c37a4f79f4 100644 --- a/singlestore/README.md +++ b/singlestore/README.md @@ -111,8 +111,8 @@ Need help? Contact [Datadog support][10]. [1]: https://www.singlestore.com/ -[2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://docs.datadoghq.com/agent/ +[2]: https://docs.datadoghq.com/getting_started/agent/autodiscovery#integration-templates +[3]: https://app.datadoghq.com/account/settings#agent [4]: https://github.com/DataDog/integrations-core/blob/master/singlestore/datadog_checks/singlestore/data/conf.yaml.example [5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [6]: https://docs.datadoghq.com/agent/kubernetes/log/ diff --git a/snowflake/README.md b/snowflake/README.md index 16aacba8c4fa8..8fa7dd970de4c 100644 --- a/snowflake/README.md +++ b/snowflake/README.md @@ -220,7 +220,7 @@ Need help? Contact [Datadog support][15]. [1]: https://www.snowflake.com/ -[2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://docs.datadoghq.com/agent/guide/agent-v6-python-3/?tab=hostagent [4]: https://docs.snowflake.com/en/sql-reference/account-usage.html#enabling-account-usage-for-other-roles [5]: https://github.com/DataDog/integrations-core/blob/master/snowflake/datadog_checks/snowflake/data/conf.yaml.example diff --git a/sonarqube/README.md b/sonarqube/README.md index 6b4dd91d1e639..aab744a737d77 100644 --- a/sonarqube/README.md +++ b/sonarqube/README.md @@ -252,7 +252,7 @@ Need help? Contact [Datadog support][8]. [1]: https://www.sonarqube.org -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://github.com/DataDog/integrations-core/blob/master/sonarqube/datadog_checks/sonarqube/data/metrics.yaml [4]: https://docs.sonarqube.org/latest/instance-administration/monitoring/ [5]: https://docs.sonarqube.org/latest/instance-administration/monitoring/#header-4 diff --git a/twistlock/README.md b/twistlock/README.md index e23de408686cb..681d25f611a25 100644 --- a/twistlock/README.md +++ b/twistlock/README.md @@ -21,16 +21,16 @@ To configure this check for an Agent running on a host: ##### Metric collection -1. Edit the `twistlock.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your twistlock performance data. See the [sample twistlock.d/conf.yaml][2] for all available configuration options. +1. Edit the `twistlock.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your twistlock performance data. See the [sample twistlock.d/conf.yaml][3] for all available configuration options. -2. [Restart the Agent][3]. +2. [Restart the Agent][4]. #### Containerized -For containerized environments, see the [Autodiscovery Integration Templates][4] for guidance on applying the parameters below. +For containerized environments, see the [Autodiscovery Integration Templates][5] for guidance on applying the parameters below. ##### Metric collection @@ -72,7 +72,7 @@ spec: _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][5]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][6]. | Parameter | Value | | -------------- | ------------------------------------------------- | @@ -80,7 +80,7 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ ###### Kubernetes -1. Collecting logs is disabled by default in the Datadog Agent. Enable it in your [DaemonSet configuration][6]: +1. Collecting logs is disabled by default in the Datadog Agent. Enable it in your [DaemonSet configuration][7]: ```yaml #(...) @@ -93,7 +93,7 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ #(...) ``` -2. Make sure that the Docker socket is mounted to the Datadog Agent as done in [this manifest][7]. +2. Make sure that the Docker socket is mounted to the Datadog Agent as done in [this manifest][8]. 3. Make sure the log section is included in the Pod annotation for the defender, where the container name can be found just below in the pod spec: @@ -101,7 +101,7 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ ad.datadoghq.com/.logs: '[{"source": "twistlock", "service": "twistlock"}]' ``` -4. [Restart the Agent][3]. +4. [Restart the Agent][4]. ###### Docker @@ -117,22 +117,22 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ ad.datadoghq.com/.logs: '[{"source": "twistlock", "service": "twistlock"}]' ``` -3. Make sure that the Docker socket is mounted to the Datadog Agent. More information about the required configuration to collect logs with the Datadog Agent available in the [Docker documentation][8]. +3. Make sure that the Docker socket is mounted to the Datadog Agent. More information about the required configuration to collect logs with the Datadog Agent available in the [Docker documentation][9]. -4. [Restart the Agent][3]. +4. [Restart the Agent][4]. ### Validation -[Run the Agent's status subcommand][9] and look for `twistlock` under the Checks section. +[Run the Agent's status subcommand][10] and look for `twistlock` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][10] for a list of metrics provided by this check. +See [metadata.csv][11] for a list of metrics provided by this check. ### Events @@ -140,22 +140,23 @@ Prisma Cloud Compute Edition sends an event when a new CVE is found. ### Service Checks -See [service_checks.json][11] for a list of service checks provided by this integration. +See [service_checks.json][12] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][12]. +Need help? Contact [Datadog support][13]. [1]: https://www.paloaltonetworks.com/prisma/cloud -[2]: https://github.com/DataDog/integrations-core/blob/master/twistlock/datadog_checks/twistlock/data/conf.yaml.example -[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[4]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[5]: https://docs.datadoghq.com/agent/kubernetes/log/?tab=containerinstallation#setup -[6]: https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/#log-collection -[7]: https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/#create-manifest -[8]: https://docs.datadoghq.com/agent/docker/log/?tab=containerinstallation -[9]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[10]: https://github.com/DataDog/integrations-core/blob/master/twistlock/metadata.csv -[11]: https://github.com/DataDog/integrations-core/blob/master/twistlock/assets/service_checks.json -[12]: https://docs.datadoghq.com/help/ +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/integrations-core/blob/master/twistlock/datadog_checks/twistlock/data/conf.yaml.example +[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[5]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[6]: https://docs.datadoghq.com/agent/kubernetes/log/?tab=containerinstallation#setup +[7]: https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/#log-collection +[8]: https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/#create-manifest +[9]: https://docs.datadoghq.com/agent/docker/log/?tab=containerinstallation +[10]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[11]: https://github.com/DataDog/integrations-core/blob/master/twistlock/metadata.csv +[12]: https://github.com/DataDog/integrations-core/blob/master/twistlock/assets/service_checks.json +[13]: https://docs.datadoghq.com/help/ diff --git a/vertica/README.md b/vertica/README.md index d6e4bc1c1c947..6ef19e692cf43 100644 --- a/vertica/README.md +++ b/vertica/README.md @@ -84,7 +84,7 @@ Need help? Contact [Datadog support][12]. [1]: https://www.vertica.com -[2]: https://docs.datadoghq.com/agent/ +[2]: https://app.datadoghq.com/account/settings#agent [3]: https://github.com/DataDog/integrations-core/blob/master/vertica/datadog_checks/vertica/data/conf.yaml.example [4]: https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/Glossary/vsql.htm [5]: https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/AdministratorsGuide/DBUsersAndPrivileges/Roles/SYSMONITORROLE.htm diff --git a/voltdb/README.md b/voltdb/README.md index 0522b997b2e96..f9e5bd6e3bc36 100644 --- a/voltdb/README.md +++ b/voltdb/README.md @@ -12,7 +12,7 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The VoltDB check is included in the [Datadog Agent][2] package. +The VoltDB check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. ### Configuration @@ -26,7 +26,7 @@ No additional installation is needed on your server. ``` -2. Edit the `voltdb.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your VoltDB performance data. See the [sample voltdb.d/conf.yaml][3] for all available configuration options. +2. Edit the `voltdb.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your VoltDB performance data. See the [sample voltdb.d/conf.yaml][4] for all available configuration options. ```yaml init_config: @@ -37,11 +37,11 @@ No additional installation is needed on your server. password: "" ``` -3. [Restart the Agent][4]. +3. [Restart the Agent][5]. #### TLS support -If [TLS/SSL][5] is enabled on the client HTTP port: +If [TLS/SSL][6] is enabled on the client HTTP port: 1. Export your certificate CA file in PEM format: @@ -76,7 +76,7 @@ If [TLS/SSL][5] is enabled on the client HTTP port: tls_ca_cert: /path/to/voltdb-ca.pem ``` -3. [Restart the Agent][4]. +3. [Restart the Agent][5]. #### Log collection @@ -95,21 +95,21 @@ If [TLS/SSL][5] is enabled on the client HTTP port: source: voltdb ``` - Change the `path` value based on your environment. See the [sample `voltdb.d/conf.yaml` file][3] for all available configuration options. + Change the `path` value based on your environment. See the [sample `voltdb.d/conf.yaml` file][4] for all available configuration options. - 3. [Restart the Agent][4]. + 3. [Restart the Agent][5]. - See [Datadog's documentation][6] for additional information on how to configure the Agent for log collection in Kubernetes environments. + See [Datadog's documentation][7] for additional information on how to configure the Agent for log collection in Kubernetes environments. ### Validation -[Run the Agent's status subcommand][7] and look for `voltdb` under the Checks section. +[Run the Agent's status subcommand][8] and look for `voltdb` under the Checks section. ## Data Collected ### Metrics -See [metadata.csv][8] for a list of metrics provided by this check. +See [metadata.csv][9] for a list of metrics provided by this check. ### Events @@ -117,11 +117,11 @@ This check does not include any events. ### Service Checks -See [service_checks.json][9] for a list of service checks provided by this integration. +See [service_checks.json][10] for a list of service checks provided by this integration. ## Troubleshooting -Need help? Contact [Datadog support][10]. +Need help? Contact [Datadog support][11]. ## Further Reading @@ -129,11 +129,12 @@ Need help? Contact [Datadog support][10]. [1]: https://voltdb.com [2]: https://docs.datadoghq.com/agent/kubernetes/integrations/ -[3]: https://github.com/DataDog/integrations-core/blob/master/voltdb/datadog_checks/voltdb/data/conf.yaml.example -[4]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[5]: https://docs.voltdb.com/UsingVoltDB/SecuritySSL.php -[6]: https://docs.datadoghq.com/agent/kubernetes/log/ -[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[8]: https://github.com/DataDog/integrations-core/blob/master/voltdb/metadata.csv -[9]: https://github.com/DataDog/integrations-core/blob/master/voltdb/assets/service_checks.json -[10]: https://docs.datadoghq.com/help/ +[3]: https://app.datadoghq.com/account/settings#agent +[4]: https://github.com/DataDog/integrations-core/blob/master/voltdb/datadog_checks/voltdb/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.voltdb.com/UsingVoltDB/SecuritySSL.php +[7]: https://docs.datadoghq.com/agent/kubernetes/log/ +[8]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[9]: https://github.com/DataDog/integrations-core/blob/master/voltdb/metadata.csv +[10]: https://github.com/DataDog/integrations-core/blob/master/voltdb/assets/service_checks.json +[11]: https://docs.datadoghq.com/help/ From 163707dad6fd1a08faece0a7d13a564f108b950c Mon Sep 17 00:00:00 2001 From: Fanny Jiang Date: Mon, 8 Nov 2021 16:57:27 -0500 Subject: [PATCH 18/19] update mergefreeze process (#10579) --- docs/developer/process/agent-release/pre-release.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/developer/process/agent-release/pre-release.md b/docs/developer/process/agent-release/pre-release.md index a3e8e66eedb55..dd13fbb164596 100644 --- a/docs/developer/process/agent-release/pre-release.md +++ b/docs/developer/process/agent-release/pre-release.md @@ -40,6 +40,8 @@ Ensure that you have configured the following: At midnight (EDT/EST) on the Friday before QA week we freeze, at which point the release manager will release all integrations with pending changes then branch off. If no new PRs are active at the end of business hours (EDT/EST), try to make the initial [release](#release) then, so that the QA process can start on the next Monday morning. +The release manager will initiate code freeze of the `integrations-core` repo using the [mergefreeze](https://datadoghq.atlassian.net/wiki/spaces/agent/pages/2255421890/mergefreeze) Github App by clicking the `Freeze now` button. The mergefreeze app will add a failing `mergefreeze` CI check on all PRs targeting the protected branch, preventing them from being merged by non-admin users. + ### Release 1. Make a pull request to release [any new integrations](../integration-release.md#new-integrations), then merge it and pull `master` From 006e9e340041dd2f07852ab28e70dfc63b89ca11 Mon Sep 17 00:00:00 2001 From: Ofek Lev Date: Mon, 8 Nov 2021 21:55:45 -0500 Subject: [PATCH 19/19] Add runtime configuration validation (#8953) * Sync config models * re-sync * re-sync * Update tox.ini --- .../marklogic/config_models/__init__.py | 18 ++ .../marklogic/config_models/defaults.py | 180 ++++++++++++++++++ .../marklogic/config_models/instance.py | 99 ++++++++++ .../marklogic/config_models/shared.py | 54 ++++++ .../marklogic/config_models/validators.py | 3 + marklogic/tox.ini | 2 + 6 files changed, 356 insertions(+) create mode 100644 marklogic/datadog_checks/marklogic/config_models/__init__.py create mode 100644 marklogic/datadog_checks/marklogic/config_models/defaults.py create mode 100644 marklogic/datadog_checks/marklogic/config_models/instance.py create mode 100644 marklogic/datadog_checks/marklogic/config_models/shared.py create mode 100644 marklogic/datadog_checks/marklogic/config_models/validators.py diff --git a/marklogic/datadog_checks/marklogic/config_models/__init__.py b/marklogic/datadog_checks/marklogic/config_models/__init__.py new file mode 100644 index 0000000000000..ba42dbdc7ffb0 --- /dev/null +++ b/marklogic/datadog_checks/marklogic/config_models/__init__.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/marklogic/datadog_checks/marklogic/config_models/defaults.py b/marklogic/datadog_checks/marklogic/config_models/defaults.py new file mode 100644 index 0000000000000..cde59a2320b52 --- /dev/null +++ b/marklogic/datadog_checks/marklogic/config_models/defaults.py @@ -0,0 +1,180 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.utils.models.fields import get_default_field_value + + +def shared_proxy(field, value): + return get_default_field_value(field, value) + + +def shared_service(field, value): + return get_default_field_value(field, value) + + +def shared_skip_proxy(field, value): + return False + + +def shared_timeout(field, value): + return 10 + + +def instance_allow_redirects(field, value): + return True + + +def instance_auth_token(field, value): + return get_default_field_value(field, value) + + +def instance_auth_type(field, value): + return 'basic' + + +def instance_aws_host(field, value): + return get_default_field_value(field, value) + + +def instance_aws_region(field, value): + return get_default_field_value(field, value) + + +def instance_aws_service(field, value): + return get_default_field_value(field, value) + + +def instance_connect_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_disable_generic_tags(field, value): + return False + + +def instance_empty_default_hostname(field, value): + return False + + +def instance_enable_health_service_checks(field, value): + return False + + +def instance_extra_headers(field, value): + return get_default_field_value(field, value) + + +def instance_headers(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_auth(field, value): + return 'disabled' + + +def instance_kerberos_cache(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_delegate(field, value): + return False + + +def instance_kerberos_force_initiate(field, value): + return False + + +def instance_kerberos_hostname(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_keytab(field, value): + return get_default_field_value(field, value) + + +def instance_kerberos_principal(field, value): + return get_default_field_value(field, value) + + +def instance_log_requests(field, value): + return False + + +def instance_min_collection_interval(field, value): + return 15 + + +def instance_ntlm_domain(field, value): + return get_default_field_value(field, value) + + +def instance_password(field, value): + return get_default_field_value(field, value) + + +def instance_persist_connections(field, value): + return False + + +def instance_proxy(field, value): + return get_default_field_value(field, value) + + +def instance_read_timeout(field, value): + return get_default_field_value(field, value) + + +def instance_request_size(field, value): + return 16 + + +def instance_resource_filters(field, value): + return get_default_field_value(field, value) + + +def instance_service(field, value): + return get_default_field_value(field, value) + + +def instance_skip_proxy(field, value): + return False + + +def instance_tags(field, value): + return get_default_field_value(field, value) + + +def instance_timeout(field, value): + return 10 + + +def instance_tls_ca_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_cert(field, value): + return get_default_field_value(field, value) + + +def instance_tls_ignore_warning(field, value): + return False + + +def instance_tls_private_key(field, value): + return get_default_field_value(field, value) + + +def instance_tls_use_host_header(field, value): + return False + + +def instance_tls_verify(field, value): + return True + + +def instance_use_legacy_auth_encoding(field, value): + return True + + +def instance_username(field, value): + return get_default_field_value(field, value) diff --git a/marklogic/datadog_checks/marklogic/config_models/instance.py b/marklogic/datadog_checks/marklogic/config_models/instance.py new file mode 100644 index 0000000000000..b311a8c1e8071 --- /dev/null +++ b/marklogic/datadog_checks/marklogic/config_models/instance.py @@ -0,0 +1,99 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Any, Mapping, Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + class Config: + allow_mutation = False + + reader: Optional[Mapping[str, Any]] + writer: Optional[Mapping[str, Any]] + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class InstanceConfig(BaseModel): + class Config: + allow_mutation = False + + allow_redirects: Optional[bool] + auth_token: Optional[AuthToken] + auth_type: Optional[str] + aws_host: Optional[str] + aws_region: Optional[str] + aws_service: Optional[str] + connect_timeout: Optional[float] + disable_generic_tags: Optional[bool] + empty_default_hostname: Optional[bool] + enable_health_service_checks: Optional[bool] + extra_headers: Optional[Mapping[str, Any]] + headers: Optional[Mapping[str, Any]] + kerberos_auth: Optional[str] + kerberos_cache: Optional[str] + kerberos_delegate: Optional[bool] + kerberos_force_initiate: Optional[bool] + kerberos_hostname: Optional[str] + kerberos_keytab: Optional[str] + kerberos_principal: Optional[str] + log_requests: Optional[bool] + min_collection_interval: Optional[float] + ntlm_domain: Optional[str] + password: Optional[str] + persist_connections: Optional[bool] + proxy: Optional[Proxy] + read_timeout: Optional[float] + request_size: Optional[float] + resource_filters: Optional[Sequence[Mapping[str, Any]]] + service: Optional[str] + skip_proxy: Optional[bool] + tags: Optional[Sequence[str]] + timeout: Optional[float] + tls_ca_cert: Optional[str] + tls_cert: Optional[str] + tls_ignore_warning: Optional[bool] + tls_private_key: Optional[str] + tls_use_host_header: Optional[bool] + tls_verify: Optional[bool] + url: str + use_legacy_auth_encoding: Optional[bool] + username: Optional[str] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'instance_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'instance_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values)) diff --git a/marklogic/datadog_checks/marklogic/config_models/shared.py b/marklogic/datadog_checks/marklogic/config_models/shared.py new file mode 100644 index 0000000000000..4fc6216ab6c2f --- /dev/null +++ b/marklogic/datadog_checks/marklogic/config_models/shared.py @@ -0,0 +1,54 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from __future__ import annotations + +from typing import Optional, Sequence + +from pydantic import BaseModel, root_validator, validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class Proxy(BaseModel): + class Config: + allow_mutation = False + + http: Optional[str] + https: Optional[str] + no_proxy: Optional[Sequence[str]] + + +class SharedConfig(BaseModel): + class Config: + allow_mutation = False + + proxy: Optional[Proxy] + service: Optional[str] + skip_proxy: Optional[bool] + timeout: Optional[float] + + @root_validator(pre=True) + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @validator('*', pre=True, always=True) + def _ensure_defaults(cls, v, field): + if v is not None or field.required: + return v + + return getattr(defaults, f'shared_{field.name}')(field, v) + + @validator('*') + def _run_validations(cls, v, field): + if not v: + return v + + return getattr(validators, f'shared_{field.name}', identity)(v, field=field) + + @root_validator(pre=False) + def _final_validation(cls, values): + return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values)) diff --git a/marklogic/datadog_checks/marklogic/config_models/validators.py b/marklogic/datadog_checks/marklogic/config_models/validators.py new file mode 100644 index 0000000000000..9d0b0155542cb --- /dev/null +++ b/marklogic/datadog_checks/marklogic/config_models/validators.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/marklogic/tox.ini b/marklogic/tox.ini index 752fe178f93c7..fd9462c102400 100644 --- a/marklogic/tox.ini +++ b/marklogic/tox.ini @@ -14,6 +14,8 @@ dd_mypy_args = --non-interactive datadog_checks/ tests/ + --exclude + '.*/config_models/.*\.py$' dd_mypy_deps = types-mock==0.1.5 description =