Skip to content

Commit

Permalink
Add e2e support (#2995)
Browse files Browse the repository at this point in the history
  • Loading branch information
zippolyte authored Jan 22, 2019
1 parent 2c03330 commit eb3c659
Showing 3 changed files with 129 additions and 104 deletions.
94 changes: 94 additions & 0 deletions sqlserver/tests/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)

import os
import sys

from datadog_checks.sqlserver import SQLServer
from datadog_checks.dev import get_docker_hostname


def lib_tds_path():
"""
This is definitely ugly but should do the trick most of the times. On OSX
we can point unixODBC directly to the FreeTDS client library. On linux instead
we need to define the 'FreeTDS' driver in odbcinst.ini
"""
if sys.platform == 'darwin':
return '/usr/local/lib/libtdsodbc.so'
return 'FreeTDS'


HOST = get_docker_hostname()
PORT = 1433
HERE = os.path.dirname(os.path.abspath(__file__))
CHECK_NAME = "sqlserver"

EXPECTED_METRICS = [m[0] for m in SQLServer.METRICS]

INSTANCE_DOCKER = {
'host': '{},1433'.format(HOST),
'connector': 'odbc',
'driver': lib_tds_path(),
'username': 'sa',
'password': 'Password123',
'tags': ['optional:tag1'],
}

INSTANCE_SQL2008 = {
'host': r'(local)\SQL2008R2SP2',
'username': 'sa',
'password': 'Password12!',
}

INIT_CONFIG = {
'custom_metrics': [
{
'name': 'sqlserver.clr.execution',
'type': 'gauge',
'counter_name': 'CLR Execution',
},
{
'name': 'sqlserver.exec.in_progress',
'type': 'gauge',
'counter_name': 'OLEDB calls',
'instance_name': 'Cumulative execution time (ms) per second',
},
{
'name': 'sqlserver.db.commit_table_entries',
'type': 'gauge',
'counter_name': 'Log Flushes/sec',
'instance_name': 'ALL',
'tag_by': 'db',
},
],
}

INIT_CONFIG_OBJECT_NAME = {
'custom_metrics': [
{
'name': 'sqlserver.cache.hit_ratio',
'counter_name': 'Cache Hit Ratio',
'instance_name': 'SQL Plans',
'object_name': 'SQLServer:Plan Cache',
'tags': [
'optional_tag:tag1'
]
},
{
'name': 'sqlserver.active_requests',
'counter_name': 'Active requests',
'instance_name': 'default',
'object_name': 'SQLServer:Workload Group Stats',
'tags': [
'optional_tag:tag1'
]
}
]
}

FULL_CONFIG = {
"init_config": INIT_CONFIG,
"instances": [INSTANCE_DOCKER]
}
97 changes: 23 additions & 74 deletions sqlserver/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,105 +1,54 @@
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)

import os
import time
import sys
from copy import deepcopy

import pytest
try:
import pyodbc
except ImportError:
pyodbc = None

from datadog_checks.dev import docker_run, get_docker_hostname, RetryError

from datadog_checks.dev import docker_run, WaitFor

HOST = get_docker_hostname()
PORT = 1433
HERE = os.path.dirname(os.path.abspath(__file__))
from .common import (
INIT_CONFIG, INIT_CONFIG_OBJECT_NAME, INSTANCE_SQL2008, INSTANCE_DOCKER, HOST, PORT, HERE, FULL_CONFIG, lib_tds_path
)


def lib_tds_path():
"""
This is definitely ugly but should do the trick most of the times. On OSX
we can point unixODBC directly to the FreeTDS client library. On linux instead
we need to define the 'FreeTDS' driver in odbcinst.ini
"""
if sys.platform == 'darwin':
return '/usr/local/lib/libtdsodbc.so'
return 'FreeTDS'
@pytest.fixture
def init_config():
return deepcopy(INIT_CONFIG)


@pytest.fixture
def init_config():
return {
'custom_metrics': [
{
'name': 'sqlserver.clr.execution',
'type': 'gauge',
'counter_name': 'CLR Execution',
},
{
'name': 'sqlserver.exec.in_progress',
'type': 'gauge',
'counter_name': 'OLEDB calls',
'instance_name': 'Cumulative execution time (ms) per second',
},
{
'name': 'sqlserver.db.commit_table_entries',
'type': 'gauge',
'counter_name': 'Log Flushes/sec',
'instance_name': 'ALL',
'tag_by': 'db',
},
],
}
def init_config_object_name():
return deepcopy(INIT_CONFIG_OBJECT_NAME)


@pytest.fixture
def instance_sql2008():
return {
'host': r'(local)\SQL2008R2SP2',
'username': 'sa',
'password': 'Password12!',
}
return deepcopy(INSTANCE_SQL2008)


@pytest.fixture
def instance_docker():
return {
'host': '{},1433'.format(HOST),
'connector': 'odbc',
'driver': lib_tds_path(),
'username': 'sa',
'password': 'Password123',
'tags': ['optional:tag1'],
}
return deepcopy(INSTANCE_DOCKER)


@pytest.fixture(scope='session')
def sqlserver():
def dd_environment():
if pyodbc is None:
raise Exception("pyodbc is not installed!")

compose_file = os.path.join(HERE, 'compose', 'docker-compose.yaml')
conn = 'DRIVER={};Server={},{};Database=master;UID=sa;PWD=Password123;'
conn = conn.format(lib_tds_path(), HOST, PORT)

def condition():
sys.stderr.write("Waiting for SQLServer to boot...\n")
booted = False
for _ in xrange(10):
try:
pyodbc.connect(conn, timeout=30)
booted = True
except pyodbc.Error as e:
sys.stderr.write(str(e)+'\n')
time.sleep(3)

if not booted:
raise RetryError("SQLServer failed to boot!")
sys.stderr.write("SQLServer boot complete.\n")

with docker_run(compose_file=compose_file, conditions=[condition]):
yield
def sqlserver():
conn = 'DRIVER={};Server={},{};Database=master;UID=sa;PWD=Password123;'.format(lib_tds_path(), HOST, PORT)
pyodbc.connect(conn, timeout=30)

with docker_run(
compose_file=os.path.join(HERE, 'compose', 'docker-compose.yaml'),
conditions=[WaitFor(sqlserver, wait=3, attempts=10)]
):
yield FULL_CONFIG
42 changes: 12 additions & 30 deletions sqlserver/tests/test_sqlserver.py
Original file line number Diff line number Diff line change
@@ -6,56 +6,38 @@
from datadog_checks.sqlserver import SQLServer
from datadog_checks.sqlserver.sqlserver import SQLConnectionError


CHECK_NAME = 'sqlserver'
EXPECTED_METRICS = [m[0] for m in SQLServer.METRICS]
from .common import CHECK_NAME, EXPECTED_METRICS


@pytest.mark.docker
def test_check_invalid_password(aggregator, init_config, instance_docker, sqlserver):
@pytest.mark.usefixtures("dd_environment")
def test_check_invalid_password(aggregator, init_config, instance_docker):
instance_docker['password'] = 'FOO'

sqlserver_check = SQLServer(CHECK_NAME, init_config, {}, [instance_docker])

with pytest.raises(SQLConnectionError) as excinfo:
sqlserver_check.check(instance_docker)
assert excinfo.value.args[0] == 'Unable to connect to SQL Server'
aggregator.assert_service_check('sqlserver.can_connect', status=sqlserver_check.CRITICAL,
tags=['host:localhost,1433', 'db:master', 'optional:tag1'])
aggregator.assert_service_check(
'sqlserver.can_connect',
status=sqlserver_check.CRITICAL,
tags=['host:localhost,1433', 'db:master', 'optional:tag1']
)


@pytest.mark.docker
def test_check_docker(aggregator, init_config, instance_docker, sqlserver):
@pytest.mark.usefixtures("dd_environment")
def test_check_docker(aggregator, init_config, instance_docker):
sqlserver_check = SQLServer(CHECK_NAME, init_config, {}, [instance_docker])
sqlserver_check.check(instance_docker)
expected_tags = instance_docker.get('tags', []) + ['host:{}'.format(instance_docker.get('host')), 'db:master']
_assert_metrics(aggregator, expected_tags)


@pytest.mark.docker
def test_object_name(aggregator, instance_docker, sqlserver):
init_config_object_name = {
'custom_metrics': [
{
'name': 'sqlserver.cache.hit_ratio',
'counter_name': 'Cache Hit Ratio',
'instance_name': 'SQL Plans',
'object_name': 'SQLServer:Plan Cache',
'tags': [
'optional_tag:tag1'
]
},
{
'name': 'sqlserver.active_requests',
'counter_name': 'Active requests',
'instance_name': 'default',
'object_name': 'SQLServer:Workload Group Stats',
'tags': [
'optional_tag:tag1'
]
}
]
}
@pytest.mark.usefixtures("dd_environment")
def test_object_name(aggregator, init_config_object_name, instance_docker):

sqlserver_check = SQLServer(CHECK_NAME, init_config_object_name, {}, [instance_docker])
sqlserver_check.check(instance_docker)

0 comments on commit eb3c659

Please sign in to comment.