Skip to content

Xgb datasets adding #60

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 39 commits into from
Apr 26, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
62f87c3
Applied mypy + flake8 for all files
Mar 22, 2021
132d73f
Sorted imports with ISort
Mar 22, 2021
4aa4898
Moved env change to runner
Mar 22, 2021
5a8db33
fixed all mypy errors and added mypy check to CI
Mar 22, 2021
5594efd
Yet another mypy fixes
Mar 22, 2021
35b55b8
Small runner refactoring
Mar 23, 2021
56de8f7
First attempt of adding nvidia datasets
Mar 29, 2021
0ee5f05
Merge branch 'master' into mypy-applying
Mar 29, 2021
04e7a64
removed E265 ignoring for flake8 job
Mar 29, 2021
8268747
Merge remote-tracking branch 'my/mypy-applying' into xgb-nvidia-datasets
Mar 30, 2021
b6a7eb0
NVidia benchmarks are working now
Mar 30, 2021
7e780bb
Added higgs, msrank and airline fetching
Mar 30, 2021
670c289
small fixes of env
Mar 30, 2021
dc0e9c9
Applying comments
Apr 1, 2021
f64ae68
Merge branch 'mypy-applying' into xgb-nvidia-datasets
Apr 1, 2021
873754b
Split dataset loading to different files
Apr 1, 2021
93ea32d
Merge remote-tracking branch 'origin/master' into xgb-nvidia-datasets
Apr 1, 2021
dcfc5b9
Why doesnt mypy work?
Apr 1, 2021
340402e
Added abalone + letters, updated all GB configs
Apr 15, 2021
6e47423
Added links and descriptions for new datasets
Apr 15, 2021
340a628
Merge remote-tracking branch 'origin/master' into xgb-nvidia-datasets
Apr 15, 2021
4be3720
handling mypy
Apr 15, 2021
8184016
Handled skex fake message throwing
Apr 15, 2021
cf5ee76
Trying to handle mypy, at. 3
Apr 15, 2021
9db3177
Trying to handle mypy, at. 4
Apr 15, 2021
5e76a0b
Trying to handle mypy, at. 5
Apr 15, 2021
13fcd20
Changed configs readme and made small fixes in GB testing configs
Apr 20, 2021
0873f97
Merge branch 'master' of https://github.com/IntelPython/scikit-learn_…
Apr 20, 2021
877e0fd
Applying more comments, updating readme's
Apr 20, 2021
8bdc7f2
Applying comments: renamed configs
Apr 20, 2021
f9cf09b
Changed all datasets to npy, applied Kirill's comments
Apr 23, 2021
41e003f
Merge branch 'master' of https://github.com/IntelPython/scikit-learn_…
Apr 23, 2021
523df30
Cleanup after someone's commit
Apr 23, 2021
59303fa
Applying mypy
Apr 23, 2021
b56e42c
Applied Ekaterina's suggestions
Apr 23, 2021
ad176e5
Applied other Ekaterina's comments
Apr 23, 2021
b92a27f
Merge branch 'xgb-nvidia-datasets' of https://github.com/RukhovichIV/…
Apr 23, 2021
11a8ffc
Final commits applying
Apr 26, 2021
37d5461
Alexander's final comments
Apr 26, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Small runner refactoring
  • Loading branch information
Igor Rukhovich committed Mar 23, 2021
commit 35b55b8a99c5c0b669d36573b6e4b02fcbf4de5a
4 changes: 2 additions & 2 deletions datasets/load_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@


def try_load_dataset(dataset_name, output_directory):
if dataset_name in dataset_loaders.keys():
if dataset_name in dataset_loaders:
try:
return dataset_loaders[dataset_name](output_directory)
except BaseException:
Expand All @@ -60,7 +60,7 @@ def try_load_dataset(dataset_name, output_directory):
args = parser.parse_args()

if args.list:
for key in dataset_loaders.keys():
for key in dataset_loaders:
print(key)
sys.exit(0)

Expand Down
68 changes: 25 additions & 43 deletions runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,11 @@
import json
import logging
import os
import pathlib
import socket
import sys

import datasets.make_datasets as make_datasets
import utils
from datasets.load_datasets import try_load_dataset


def generate_cases(params):
Expand Down Expand Up @@ -54,7 +52,7 @@ def generate_cases(params):
default='configs/config_example.json',
help='Path to configuration files')
parser.add_argument('--dummy-run', default=False, action='store_true',
help='Run configuration parser and datasets generation'
help='Run configuration parser and datasets generation '
'without benchmarks running')
parser.add_argument('--no-intel-optimized', default=False, action='store_true',
help='Use no intel optimized version. '
Expand All @@ -69,7 +67,6 @@ def generate_cases(params):
help='Create an Excel report based on benchmarks results. '
'Need "openpyxl" library')
args = parser.parse_args()
env = os.environ.copy()

logging.basicConfig(
stream=sys.stdout, format='%(levelname)s: %(message)s', level=args.verbose)
Expand All @@ -90,8 +87,6 @@ def generate_cases(params):
with open(config_name, 'r') as config_file:
config = json.load(config_file)

if 'omp_env' not in config.keys():
config['omp_env'] = []
# get parameters that are common for all cases
common_params = config['common']
for params_set in config['cases']:
Expand All @@ -107,34 +102,21 @@ def generate_cases(params):

for dataset in params_set['dataset']:
if dataset['source'] in ['csv', 'npy']:
train_data = dataset["training"]
file_train_data_x = train_data["x"]
paths = f'--file-X-train {file_train_data_x}'
if 'y' in dataset['training'].keys():
file_train_data_y = train_data["y"]
paths += f' --file-y-train {file_train_data_y}'
if 'testing' in dataset.keys():
test_data = dataset["testing"]
file_test_data_x = test_data["x"]
paths += f' --file-X-test {file_test_data_x}'
if 'y' in dataset['testing'].keys():
file_test_data_y = test_data["y"]
paths += f' --file-y-test {file_test_data_y}'
if 'name' in dataset.keys():
dataset_name = dataset['name']
else:
dataset_name = 'unknown'

if not utils.is_exists_files([file_train_data_x]):
directory_dataset = pathlib.Path(file_train_data_x).parent
if not try_load_dataset(dataset_name=dataset_name,
output_directory=directory_dataset):
logging.warning(f'Dataset {dataset_name} '
'could not be loaded. \n'
'Check the correct name or expand '
'the download in the folder dataset.')
continue

dataset_name = dataset['name'] if 'name' in dataset else 'unknown'
if 'training' not in dataset or not utils.find_the_dataset(
dataset_name, dataset['training']["x"]):
logging.warning(
f'Dataset {dataset_name} could not be loaded. \n'
'Check the correct name or expand the download in '
'the folder dataset.')
continue
paths = '--file-X-train ' + dataset['training']["x"]
if 'y' in dataset['training']:
paths += ' --file-y-train ' + dataset['training']["y"]
if 'testing' in dataset:
paths += ' --file-X-test ' + dataset["testing"]["x"]
if 'y' in dataset['testing']:
paths += ' --file-y-test ' + dataset["testing"]["y"]
elif dataset['source'] == 'synthetic':
class GenerationArgs:
classes: int
Expand All @@ -151,7 +133,7 @@ class GenerationArgs:
gen_args = GenerationArgs()
paths = ''

if 'seed' in params_set.keys():
if 'seed' in params_set:
gen_args.seed = params_set['seed']
else:
gen_args.seed = 777
Expand All @@ -161,10 +143,10 @@ class GenerationArgs:
gen_args.type = dataset['type']
gen_args.samples = dataset['training']['n_samples']
gen_args.features = dataset['n_features']
if 'n_classes' in dataset.keys():
if 'n_classes' in dataset:
gen_args.classes = dataset['n_classes']
cls_num_for_file = f'-{dataset["n_classes"]}'
elif 'n_clusters' in dataset.keys():
elif 'n_clusters' in dataset:
gen_args.clusters = dataset['n_clusters']
cls_num_for_file = f'-{dataset["n_clusters"]}'
else:
Expand All @@ -179,7 +161,7 @@ class GenerationArgs:
gen_args.filey = f'{file_prefix}y-train{file_postfix}'
paths += f' --file-y-train {gen_args.filey}'

if 'testing' in dataset.keys():
if 'testing' in dataset:
gen_args.test_samples = dataset['testing']['n_samples']
gen_args.filextest = f'{file_prefix}X-test{file_postfix}'
paths += f' --file-X-test {gen_args.filextest}'
Expand All @@ -204,21 +186,21 @@ class GenerationArgs:
logging.warning('Unknown dataset source. Only synthetics datasets '
'and csv/npy files are supported now')

omp_env = utils.get_omp_env()
no_intel_optimize = \
'--no-intel-optimized ' if args.no_intel_optimized else ''
for lib in libs:
env = os.environ.copy()
if lib == 'xgboost':
if lib == 'xgboost' and 'omp_env' in config:
omp_env = utils.get_omp_env()
for var in config['omp_env']:
env[var] = omp_env[var]
if var in omp_env:
env[var] = omp_env[var]
for i, case in enumerate(cases):
command = f'python {lib}_bench/{algorithm}.py ' \
+ no_intel_optimize \
+ f'--arch {hostname} {case} {paths} ' \
+ f'--dataset-name {dataset_name}'
while ' ' in command:
command = command.replace(' ', ' ')
command = ' '.join(command.split())
logging.info(command)
if not args.dummy_run:
case = f'{lib},{algorithm} ' + case
Expand Down
17 changes: 9 additions & 8 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,20 @@
import logging
import multiprocessing
import os
import pathlib
import platform
import subprocess
import sys

from datasets.load_datasets import try_load_dataset


def filter_stderr(text):
# delete 'Intel(R) DAAL usage in sklearn' messages
fake_error_message = 'Intel(R) oneAPI Data Analytics Library solvers ' + \
'for sklearn enabled: ' + \
'https://intelpython.github.io/daal4py/sklearn.html'
while fake_error_message in text:
text = text.replace(fake_error_message, '')
return text
return ''.join(text.split(fake_error_message))


def filter_stdout(text):
Expand All @@ -51,9 +52,10 @@ def filter_stdout(text):
return filtered, extra


def is_exists_files(files):
for f in files:
if not os.path.isfile(f):
def find_the_dataset(name: str, fullpath: str) -> bool:
if not os.path.isfile(fullpath):
if not try_load_dataset(dataset_name=name,
output_directory=pathlib.Path(fullpath).parent):
return False
return True

Expand Down Expand Up @@ -89,11 +91,10 @@ def get_omp_env():
cpu_count = multiprocessing.cpu_count()
omp_num_threads = str(cpu_count // 2) if _is_ht_enabled() else str(cpu_count)

omp_env = {
return {
'OMP_PLACES': f'{{0}}:{cpu_count}:1',
'OMP_NUM_THREADS': omp_num_threads
}
return omp_env


def get_hw_parameters():
Expand Down
3 changes: 1 addition & 2 deletions xgboost_bench/gbt.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ def convert_xgb_predictions(y_pred, objective):
return y_pred


parser = argparse.ArgumentParser(description='xgboost gradient boosted trees '
'benchmark')
parser = argparse.ArgumentParser(description='xgboost gradient boosted trees benchmark')

parser.add_argument('--n-estimators', type=int, default=100,
help='Number of gradient boosted trees')
Expand Down