Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[CI] Add integration tests (#5450)
Browse files Browse the repository at this point in the history
* add test_score.py for IT

* caffe

* update
  • Loading branch information
mli authored Mar 17, 2017
1 parent 2faac0e commit 430ea7b
Show file tree
Hide file tree
Showing 8 changed files with 82 additions and 44 deletions.
28 changes: 27 additions & 1 deletion Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ stage('Unit Test') {
}
},
'Python2/3: MKLML': {
node {
node('GPU') {
ws('workspace/ut-python-mklml') {
init_git()
unpack_lib('mklml')
Expand All @@ -178,3 +178,29 @@ stage('Unit Test') {
}
}
}


stage('Integration Test') {
parallel 'Python': {
node('GPU') {
ws('workspace/it-python-gpu') {
init_git()
unpack_lib('gpu')
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} gpu PYTHONPATH=./python/ python example/image-classification/test_score.py"
}
}
}
},
'Caffe': {
node('GPU') {
ws('workspace/it-caffe') {
init_git()
unpack_lib('gpu')
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} caffe_gpu PYTHONPATH=/caffe/python:./python python tools/caffe_converter/test_converter.py"
}
}
}
}
}
4 changes: 3 additions & 1 deletion example/image-classification/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import logging

def score(model, data_val, metrics, gpus, batch_size, rgb_mean=None, mean_img=None,
image_shape='3,224,224', data_nthreads=4, label_name='softmax_label'):
image_shape='3,224,224', data_nthreads=4, label_name='softmax_label', max_num_examples=None):
# create data iterator
data_shape = tuple([int(i) for i in image_shape.split(',')])
if mean_img is not None:
Expand Down Expand Up @@ -59,6 +59,8 @@ def score(model, data_val, metrics, gpus, batch_size, rgb_mean=None, mean_img=No
for m in metrics:
mod.update_metric(m, batch.label)
num += batch_size
if max_num_examples is not None and num > max_num_examples:
break
return (num / (time.time() - tic), )


Expand Down
31 changes: 13 additions & 18 deletions example/image-classification/test_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,49 +2,44 @@
test pretrained models
"""
from __future__ import print_function
import os
import mxnet as mx
from common import find_mxnet, modelzoo
from common.util import download_file, get_gpus
from score import score

VAL_DATA='data/val-5k-256.rec'
def download_data():
if not os.path.isdir('data'):
os.mkdir('data')
return download_file('http://data.mxnet.io/data/val-5k-256.rec', 'data/val-5k-256.rec')

return mx.test_utils.download(
'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA)

def test_imagenet1k_resnet(**kwargs):
models = ['imagenet1k-resnet-34',
'imagenet1k-resnet-50',
'imagenet1k-resnet-101',
'imagenet1k-resnet-152']
accs = [.72, .75, .765, .76]
models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152']
accs = [.77, .78]
for (m, g) in zip(models, accs):
acc = mx.metric.create('acc')
(speed,) = score(model=m, data_val='data/val-5k-256.rec',
(speed,) = score(model=m, data_val=VAL_DATA,
rgb_mean='0,0,0', metrics=acc, **kwargs)
r = acc.get()[1]
print('testing %s, acc = %f, speed = %f img/sec' % (m, r, speed))
print('Tested %s, acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1

def test_imagenet1k_inception_bn(**kwargs):
acc = mx.metric.create('acc')
m = 'imagenet1k-inception-bn'
g = 0.72
g = 0.75
(speed,) = score(model=m,
data_val='data/val-5k-256.rec',
data_val=VAL_DATA,
rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs)
r = acc.get()[1]
print('Tested %s acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1

if __name__ == '__main__':
gpus = get_gpus()
gpus = mx.test_utils.list_gpus()
assert len(gpus) > 0
batch_size = 16 * len(gpus)
gpus = ','.join([str(i) for i in gpus])

kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500}
download_data()
test_imagenet1k_resnet(gpus=gpus, batch_size=batch_size)
test_imagenet1k_inception_bn(gpus=gpus, batch_size=batch_size)
test_imagenet1k_resnet(**kwargs)
test_imagenet1k_inception_bn(**kwargs)
24 changes: 24 additions & 0 deletions tests/ci_build/Dockerfile.caffe_gpu
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
FROM nvidia/cuda:7.5-cudnn5-devel

COPY install/ubuntu_*.sh /install/

RUN /install/ubuntu_install_core.sh
RUN /install/ubuntu_install_python.sh

RUN apt-get install -y libprotobuf-dev libleveldb-dev \
libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler \
libatlas-base-dev python-dev libgflags-dev libgoogle-glog-dev liblmdb-dev \
python-numpy

RUN apt-get install -y --no-install-recommends libboost-all-dev

RUN cd /; git clone http://github.com/BVLC/caffe.git; cd caffe; \
cp Makefile.config.example Makefile.config

RUN echo "CPU_ONLY := 1" >> /caffe/Makefile.config

RUN cd caffe; make all pycaffe -j$(nproc)

RUN cd caffe/python; for req in $(cat requirements.txt); do pip2 install $req; done

ENV PYTHONPATH=${PYTHONPATH}:/caffe/python
7 changes: 5 additions & 2 deletions tests/ci_build/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,11 @@ To run locally, we need to first install
[docker](https://docs.docker.com/engine/installation/) and
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker/wiki).

Then we can run the tasks defined in the [Jenkinsfile](../../Jenkinsfile) by using (`ci_build.sh`)[./ci_build.sh]. For
example
We may use the AWS EC2 AMI `ami-d73bb4b7` available at US West (Oregon) which
has both pre-installed.

Then we can run the tasks defined in the [Jenkinsfile](../../Jenkinsfile) by
using (`ci_build.sh`)[./ci_build.sh]. For example

- lint the python codes

Expand Down
27 changes: 7 additions & 20 deletions tools/caffe_converter/convert_caffe_modelzoo.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,16 @@
},
'vgg-16' : {
'prototxt' : 'https://gist.githubusercontent.com/ksimonyan/211839e770f7b538e2d8/raw/c3ba00e272d9f48594acef1f67e5fd12aff7a806/VGG_ILSVRC_16_layers_deploy.prototxt',
'caffemodel' : 'http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel' ,
# 'caffemodel' : 'http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel',
'caffemodel' : 'http://data.mxnet.io/models/imagenet/test/caffe/VGG_ILSVRC_16_layers.caffemodel',
'mean': (123.68,116.779,103.939),
'top-1-acc' : 0.734,
'top-5-acc' : 0.914
},
'vgg-19' : {
'prototxt' : 'https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/bb2b4fe0a9bb0669211cf3d0bc949dfdda173e9e/VGG_ILSVRC_19_layers_deploy.prototxt',
'caffemodel' : 'http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel',
# 'caffemodel' : 'http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel',
'caffemodel' : 'http://data.mxnet.io/models/imagenet/test/caffe/VGG_ILSVRC_19_layers.caffemodel',
'mean' : (123.68,116.779,103.939),
'top-1-acc' : 0.731,
'top-5-acc' : 0.913
Expand Down Expand Up @@ -76,34 +78,19 @@ def get_model_meta_info(model_name):
"""returns a dict with model information"""
return dict(dict(model_meta_info)[model_name])

def _download_file(url, local_fname=None, force_write=False):
"""download a file by using the given URL"""
if local_fname is None:
local_fname = url.split('/')[-1]
if not force_write and os.path.exists(local_fname):
return local_fname

r = requests.get(url, stream=True)
assert r.status_code == 200, "failed to open %s" % url
with open(local_fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_fname

def _download_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download caffe model into disk by the given meta info """
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
model_name = os.path.join(dst_dir, model_name)
assert 'prototxt' in meta_info, "missing prototxt url"
prototxt = _download_file(meta_info['prototxt'], model_name+'_deploy.prototxt')
prototxt = mx.test_utils.download(meta_info['prototxt'], model_name+'_deploy.prototxt')
assert 'caffemodel' in meta_info, "mssing caffemodel url"
caffemodel = _download_file(meta_info['caffemodel'], model_name+'.caffemodel')
caffemodel = mx.test_utils.download(meta_info['caffemodel'], model_name+'.caffemodel')
assert 'mean' in meta_info, 'no mean info'
mean = meta_info['mean']
if isinstance(mean, str):
mean = _download_file(mean, model_name+'_mean.binaryproto')
mean = mx.test_utils.download(mean, model_name+'_mean.binaryproto')
return (prototxt, caffemodel, mean)

def convert_caffe_model(model_name, meta_info, dst_dir='./model'):
Expand Down
2 changes: 1 addition & 1 deletion tools/caffe_converter/convert_symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def _get_input(proto):
elif len(proto.input_shape) > 0:
input_dim = proto.input_shape[0].dim
elif layer[0].type == "Input":
input_dim = layer[0].input_param.shape._values[0].dim
input_dim = layer[0].input_param.shape[0].dim
layer.pop(0)
else:
raise ValueError('Cannot find input size')
Expand Down
3 changes: 2 additions & 1 deletion tools/caffe_converter/test_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def test_imagenet_model(model_name, val_data, gpus, batch_size):
metrics=acc,
gpus=gpus,
batch_size=batch_size,
max_num_examples=500,
**mean_args)
logging.info('speed : %f image/sec', speed)
for a in acc:
Expand All @@ -41,7 +42,7 @@ def test_imagenet_model(model_name, val_data, gpus, batch_size):
assert len(gpus) > 0
batch_size = 32 * len(gpus)

models = ['bvlc_googlenet', 'vgg-16', 'vgg-19', 'resnet-50']
models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']

val = download_data()
for m in models:
Expand Down

0 comments on commit 430ea7b

Please sign in to comment.