Skip to content

Xgb datasets adding #60

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 39 commits into from
Apr 26, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
62f87c3
Applied mypy + flake8 for all files
Mar 22, 2021
132d73f
Sorted imports with ISort
Mar 22, 2021
4aa4898
Moved env change to runner
Mar 22, 2021
5a8db33
fixed all mypy errors and added mypy check to CI
Mar 22, 2021
5594efd
Yet another mypy fixes
Mar 22, 2021
35b55b8
Small runner refactoring
Mar 23, 2021
56de8f7
First attempt of adding nvidia datasets
Mar 29, 2021
0ee5f05
Merge branch 'master' into mypy-applying
Mar 29, 2021
04e7a64
removed E265 ignoring for flake8 job
Mar 29, 2021
8268747
Merge remote-tracking branch 'my/mypy-applying' into xgb-nvidia-datasets
Mar 30, 2021
b6a7eb0
NVidia benchmarks are working now
Mar 30, 2021
7e780bb
Added higgs, msrank and airline fetching
Mar 30, 2021
670c289
small fixes of env
Mar 30, 2021
dc0e9c9
Applying comments
Apr 1, 2021
f64ae68
Merge branch 'mypy-applying' into xgb-nvidia-datasets
Apr 1, 2021
873754b
Split dataset loading to different files
Apr 1, 2021
93ea32d
Merge remote-tracking branch 'origin/master' into xgb-nvidia-datasets
Apr 1, 2021
dcfc5b9
Why doesnt mypy work?
Apr 1, 2021
340402e
Added abalone + letters, updated all GB configs
Apr 15, 2021
6e47423
Added links and descriptions for new datasets
Apr 15, 2021
340a628
Merge remote-tracking branch 'origin/master' into xgb-nvidia-datasets
Apr 15, 2021
4be3720
handling mypy
Apr 15, 2021
8184016
Handled skex fake message throwing
Apr 15, 2021
cf5ee76
Trying to handle mypy, at. 3
Apr 15, 2021
9db3177
Trying to handle mypy, at. 4
Apr 15, 2021
5e76a0b
Trying to handle mypy, at. 5
Apr 15, 2021
13fcd20
Changed configs readme and made small fixes in GB testing configs
Apr 20, 2021
0873f97
Merge branch 'master' of https://github.com/IntelPython/scikit-learn_…
Apr 20, 2021
877e0fd
Applying more comments, updating readme's
Apr 20, 2021
8bdc7f2
Applying comments: renamed configs
Apr 20, 2021
f9cf09b
Changed all datasets to npy, applied Kirill's comments
Apr 23, 2021
41e003f
Merge branch 'master' of https://github.com/IntelPython/scikit-learn_…
Apr 23, 2021
523df30
Cleanup after someone's commit
Apr 23, 2021
59303fa
Applying mypy
Apr 23, 2021
b56e42c
Applied Ekaterina's suggestions
Apr 23, 2021
ad176e5
Applied other Ekaterina's comments
Apr 23, 2021
b92a27f
Merge branch 'xgb-nvidia-datasets' of https://github.com/RukhovichIV/…
Apr 23, 2021
11a8ffc
Final commits applying
Apr 26, 2021
37d5461
Alexander's final comments
Apr 26, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Added higgs, msrank and airline fetching
  • Loading branch information
Igor Rukhovich committed Mar 30, 2021
commit 7e780bbd4d4fe6239d5972a732926e8b6b2bec7b
33 changes: 24 additions & 9 deletions configs/xgb_cpu_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -79,12 +79,17 @@
{
"dataset": [
{
"source": "csv",
"source": "npy",
"name": "airline-ohe",
"training":
{
"x": "data/airline-ohe_x_train.csv",
"y": "data/airline-ohe_y_train.csv"
"x": "data/airline-ohe_x_train.npy",
"y": "data/airline-ohe_y_train.npy"
},
"testing":
{
"x": "data/airline-ohe_x_test.npy",
"y": "data/airline-ohe_y_test.npy"
}
}
],
Expand All @@ -103,12 +108,17 @@
{
"dataset": [
{
"source": "csv",
"source": "npy",
"name": "higgs1m",
"training":
{
"x": "data/higgs1m_x_train.csv",
"y": "data/higgs1m_y_train.csv"
"x": "data/higgs1m_x_train.npy",
"y": "data/higgs1m_y_train.npy"
},
"testing":
{
"x": "data/higgs1m_x_test.npy",
"y": "data/higgs1m_y_test.npy"
}
}
],
Expand All @@ -129,12 +139,17 @@
{
"dataset": [
{
"source": "csv",
"source": "npy",
"name": "msrank",
"training":
{
"x": "data/mlsr_x_train.csv",
"y": "data/mlsr_y_train.csv"
"x": "data/msrank_x_train.npy",
"y": "data/msrank_y_train.npy"
},
"testing":
{
"x": "data/msrank_x_test.npy",
"y": "data/msrank_y_test.npy"
}
}
],
Expand Down
173 changes: 170 additions & 3 deletions datasets/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

import logging
import os
import re
import tarfile
from pathlib import Path
from typing import Any
from urllib.request import urlretrieve
Expand Down Expand Up @@ -46,6 +48,42 @@ def _retrieve(url: str, filename: str) -> None:
urlretrieve(url, filename, reporthook=_show_progress)


def _read_libsvm_msrank(file_obj, n_samples, n_features, dtype):
X = np.zeros((n_samples, n_features))
y = np.zeros((n_samples,))

counter = 0

regexp = re.compile(r'[A-Za-z0-9]+:(-?\d*\.?\d+)')

for line in file_obj:
line = str(line).replace("\\n'", "")
line = regexp.sub('\g<1>', line)
line = line.rstrip(" \n\r").split(' ')

y[counter] = int(line[0])
X[counter] = [float(i) for i in line[1:]]

counter += 1
if counter == n_samples:
break

return np.array(X, dtype=dtype), np.array(y, dtype=dtype)


def _make_gen(reader):
b = reader(1024 * 1024)
while b:
yield b
b = reader(1024 * 1024)


def _count_lines(filename):
with open(filename, 'rb') as f:
f_gen = _make_gen(f.read)
return sum(buf.count(b'\n') for buf in f_gen)


def a_nine_a(dataset_dir: Path) -> bool:
"""
Author: Ronny Kohavi","Barry Becker
Expand Down Expand Up @@ -136,7 +174,56 @@ def airline(dataset_dir: Path) -> bool:


def airline_ohe(dataset_dir: Path) -> bool:
return False
"""
Dataset from szilard benchmarks: https://github.com/szilard/GBM-perf
TaskType:binclass
NumberOfFeatures:700
NumberOfInstances:10100000
"""
dataset_name = 'airline-ohe'
os.makedirs(dataset_dir, exist_ok=True)

url_train = 'https://s3.amazonaws.com/benchm-ml--main/train-10m.csv'
url_test = 'https://s3.amazonaws.com/benchm-ml--main/test.csv'
local_url_train = os.path.join(dataset_dir, os.path.basename(url_train))
local_url_test = os.path.join(dataset_dir, os.path.basename(url_test))
if not os.path.isfile(local_url_train):
logging.info(f'Started loading {dataset_name}')
_retrieve(url_train, local_url_train)
if not os.path.isfile(local_url_test):
logging.info(f'Started loading {dataset_name}')
_retrieve(url_test, local_url_test)
logging.info(f'{dataset_name} is loaded, started parsing...')

sets = []
labels = []

categorical_names = ["Month", "DayofMonth",
"DayOfWeek", "UniqueCarrier", "Origin", "Dest"]

for local_url in [local_url_train, local_url_train]:
df = pd.read_csv(local_url, nrows=1000000
if local_url.endswith('train-10m.csv') else None)
X = df.drop('dep_delayed_15min', 1)
y = df["dep_delayed_15min"]

y_num = np.where(y == "Y", 1, 0)

sets.append(X)
labels.append(y_num)

n_samples_train = sets[0].shape[0]

X_final: Any = pd.concat(sets)
X_final = pd.get_dummies(X_final, columns=categorical_names)
sets = [X_final[:n_samples_train], X_final[n_samples_train:]]

for data, name in zip((sets[0], sets[1], labels[0], labels[1]),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True


def bosch(dataset_dir: Path) -> bool:
Expand Down Expand Up @@ -454,7 +541,43 @@ def higgs(dataset_dir: Path) -> bool:


def higgs_one_m(dataset_dir: Path) -> bool:
return False
"""
Higgs dataset from UCI machine learning repository (
https://archive.ics.uci.edu/ml/datasets/HIGGS).
TaskType:binclass
NumberOfFeatures:28
NumberOfInstances:11M
"""
dataset_name = 'higgs1m'
os.makedirs(dataset_dir, exist_ok=True)

url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz'
local_url = os.path.join(dataset_dir, os.path.basename(url))
if not os.path.isfile(local_url):
logging.info(f'Started loading {dataset_name}')
_retrieve(url, local_url)
logging.info(f'{dataset_name} is loaded, started parsing...')

nrows_train, nrows_test, dtype = 1000000, 500000, np.float32
data: Any = pd.read_csv(local_url, delimiter=",", header=None,
compression="gzip", dtype=dtype, nrows=nrows_train+nrows_test)

data = data[list(data.columns[1:])+list(data.columns[0:1])]
n_features = data.shape[1]-1
train_data = np.ascontiguousarray(data.values[:nrows_train, :n_features], dtype=dtype)
train_label = np.ascontiguousarray(data.values[:nrows_train, n_features], dtype=dtype)
test_data = np.ascontiguousarray(
data.values[nrows_train: nrows_train + nrows_test, : n_features],
dtype=dtype)
test_label = np.ascontiguousarray(
data.values[nrows_train: nrows_train + nrows_test, n_features],
dtype=dtype)
for data, name in zip((train_data, test_data, train_label, test_label),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True


def ijcnn(dataset_dir: Path) -> bool:
Expand Down Expand Up @@ -576,7 +699,51 @@ def mortgage_first_q(dataset_dir: Path) -> bool:


def msrank(dataset_dir: Path) -> bool:
return False
"""
Dataset from szilard benchmarks: https://github.com/szilard/GBM-perf
TaskType:binclass
NumberOfFeatures:700
NumberOfInstances:10100000
"""
dataset_name = 'msrank'
os.makedirs(dataset_dir, exist_ok=True)
url = "https://storage.mds.yandex.net/get-devtools-opensource/471749/msrank.tar.gz"
local_url = os.path.join(dataset_dir, os.path.basename(url))
if not os.path.isfile(local_url):
logging.info(f'Started loading {dataset_name}')
_retrieve(url, local_url)
logging.info(f'{dataset_name} is loaded, unzipping...')
tar = tarfile.open(local_url, "r:gz")
tar.extractall(dataset_dir)
tar.close()
logging.info(f'{dataset_name} is unzipped, started parsing...')

sets = []
labels = []
n_features = 137

for set_name in ['train.txt', 'vali.txt', 'test.txt']:
file_name = str(dataset_dir) + os.path.join('MSRank', set_name)

n_samples = _count_lines(file_name)
with open(file_name, 'r') as file_obj:
X, y = _read_libsvm_msrank(file_obj, n_samples, n_features, np.float32)

sets.append(X)
labels.append(y)

sets[0] = np.vstack((sets[0], sets[1]))
labels[0] = np.hstack((labels[0], labels[1]))

sets = [np.ascontiguousarray(sets[i]) for i in [0, 2]]
labels = [np.ascontiguousarray(labels[i]) for i in [0, 2]]

for data, name in zip((sets[0], sets[1], labels[0], labels[1]),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True


def plasticc(dataset_dir: Path) -> bool:
Expand Down