Skip to content

Commit 75aa144

Browse files
robertgshaw2-redhatRobert Shaw
and
Robert Shaw
authored
[ CI/Build ] LM Eval Harness Based CI Testing (#5838)
Co-authored-by: Robert Shaw <rshaw@neuralmagic>
1 parent 99397da commit 75aa144

11 files changed

+274
-0
lines changed
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh -m meta-llama/Meta-Llama-3-70B-Instruct -b 32 -l 250 -f 5
2+
model_name: "meta-llama/Meta-Llama-3-70B-Instruct"
3+
tasks:
4+
- name: "gsm8k"
5+
metrics:
6+
- name: "exact_match,strict-match"
7+
value: 0.892
8+
- name: "exact_match,flexible-extract"
9+
value: 0.892
10+
limit: 250
11+
num_fewshot: 5
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh -m neuralmagic/Meta-Llama-3-8B-Instruct-FP8 -b 32 -l 250 -f 5 -t 1
2+
model_name: "neuralmagic/Meta-Llama-3-8B-Instruct-FP8"
3+
tasks:
4+
- name: "gsm8k"
5+
metrics:
6+
- name: "exact_match,strict-match"
7+
value: 0.756
8+
- name: "exact_match,flexible-extract"
9+
value: 0.752
10+
limit: 250
11+
num_fewshot: 5
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh -m meta-llama/Meta-Llama-3-8B-Instruct -b 32 -l 250 -f 5 -t 1
2+
model_name: "meta-llama/Meta-Llama-3-8B-Instruct"
3+
tasks:
4+
- name: "gsm8k"
5+
metrics:
6+
- name: "exact_match,strict-match"
7+
value: 0.756
8+
- name: "exact_match,flexible-extract"
9+
value: 0.752
10+
limit: 250
11+
num_fewshot: 5
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh -m neuralmagic/Mixtral-8x7B-Instruct-v0.1 -b 32 -l 250 -f 5 -t 4
2+
model_name: "mistralai/Mixtral-8x7B-Instruct-v0.1"
3+
tasks:
4+
- name: "gsm8k"
5+
metrics:
6+
- name: "exact_match,strict-match"
7+
value: 0.616
8+
- name: "exact_match,flexible-extract"
9+
value: 0.632
10+
limit: 250
11+
num_fewshot: 5
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Meta-Llama-3-70B-Instruct.yaml
2+
Mixtral-8x7B-Instruct-v0.1.yaml
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Meta-Llama-3-8B-Instruct.yaml
2+
Meta-Llama-3-8B-Instruct-FP8.yaml
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
#!/bin/bash
2+
# We can use this script to compute baseline accuracy on GSM for transformers.
3+
#
4+
# Make sure you have lm-eval-harness installed:
5+
# pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@9516087b81a61d0e220b22cc1b75be76de23bc10
6+
7+
usage() {
8+
echo``
9+
echo "Runs lm eval harness on GSM8k using huggingface transformers."
10+
echo "This pathway is intended to be used to create baselines for "
11+
echo "our automated nm-test-accuracy workflow"
12+
echo
13+
echo "usage: ${0} <options>"
14+
echo
15+
echo " -m - huggingface stub or local directory of the model"
16+
echo " -b - batch size to run the evaluation at"
17+
echo " -l - limit number of samples to run"
18+
echo " -f - number of fewshot samples to use"
19+
echo
20+
}
21+
22+
while getopts "m:b:l:f:" OPT; do
23+
case ${OPT} in
24+
m )
25+
MODEL="$OPTARG"
26+
;;
27+
b )
28+
BATCH_SIZE="$OPTARG"
29+
;;
30+
l )
31+
LIMIT="$OPTARG"
32+
;;
33+
f )
34+
FEWSHOT="$OPTARG"
35+
;;
36+
\? )
37+
usage
38+
exit 1
39+
;;
40+
esac
41+
done
42+
43+
lm_eval --model hf \
44+
--model_args pretrained=$MODEL,parallelize=True \
45+
--tasks gsm8k --num_fewshot $FEWSHOT --limit $LIMIT \
46+
--batch_size $BATCH_SIZE
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
#!/bin/bash
2+
# We can use this script to compute baseline accuracy on GSM for vllm.
3+
# We use this for fp8, which HF does not support.
4+
#
5+
# Make sure you have lm-eval-harness installed:
6+
# pip install lm-eval==0.4.2
7+
8+
usage() {
9+
echo``
10+
echo "Runs lm eval harness on GSM8k using huggingface transformers."
11+
echo "This pathway is intended to be used to create baselines for "
12+
echo "our automated nm-test-accuracy workflow"
13+
echo
14+
echo "usage: ${0} <options>"
15+
echo
16+
echo " -m - huggingface stub or local directory of the model"
17+
echo " -b - batch size to run the evaluation at"
18+
echo " -l - limit number of samples to run"
19+
echo " -f - number of fewshot samples to use"
20+
echo " -t - tensor parallel size to run at"
21+
echo
22+
}
23+
24+
while getopts "m:b:l:f:t:" OPT; do
25+
case ${OPT} in
26+
m )
27+
MODEL="$OPTARG"
28+
;;
29+
b )
30+
BATCH_SIZE="$OPTARG"
31+
;;
32+
l )
33+
LIMIT="$OPTARG"
34+
;;
35+
f )
36+
FEWSHOT="$OPTARG"
37+
;;
38+
t )
39+
TP_SIZE="$OPTARG"
40+
;;
41+
\? )
42+
usage
43+
exit 1
44+
;;
45+
esac
46+
done
47+
48+
lm_eval --model vllm \
49+
--model_args pretrained=$MODEL,tensor_parallel_size=$TP_SIZE \
50+
--tasks gsm8k --num_fewshot $FEWSHOT --limit $LIMIT \
51+
--batch_size $BATCH_SIZE
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
#!/bin/bash
2+
3+
usage() {
4+
echo``
5+
echo "Runs lm eval harness on GSM8k using vllm and compares to "
6+
echo "precomputed baseline (measured by HF transformers.)"
7+
echo
8+
echo "usage: ${0} <options>"
9+
echo
10+
echo " -c - path to the test data config (e.g. configs/small-models.txt)"
11+
echo " -t - tensor parallel size"
12+
echo
13+
}
14+
15+
SUCCESS=0
16+
17+
while getopts "c:t:" OPT; do
18+
case ${OPT} in
19+
c )
20+
CONFIG="$OPTARG"
21+
;;
22+
t )
23+
TP_SIZE="$OPTARG"
24+
;;
25+
\? )
26+
usage
27+
exit 1
28+
;;
29+
esac
30+
done
31+
32+
# Parse list of configs.
33+
IFS=$'\n' read -d '' -r -a MODEL_CONFIGS < $CONFIG
34+
35+
for MODEL_CONFIG in "${MODEL_CONFIGS[@]}"
36+
do
37+
LOCAL_SUCCESS=0
38+
39+
echo "=== RUNNING MODEL: $MODEL_CONFIG WITH TP SIZE: $TP_SIZE==="
40+
41+
export LM_EVAL_TEST_DATA_FILE=$PWD/configs/${MODEL_CONFIG}
42+
export LM_EVAL_TP_SIZE=$TP_SIZE
43+
pytest -s test_lm_eval_correctness.py || LOCAL_SUCCESS=$?
44+
45+
if [[ $LOCAL_SUCCESS == 0 ]]; then
46+
echo "=== PASSED MODEL: ${MODEL_CONFIG} ==="
47+
else
48+
echo "=== FAILED MODEL: ${MODEL_CONFIG} ==="
49+
fi
50+
51+
SUCCESS=$((SUCCESS + LOCAL_SUCCESS))
52+
53+
done
54+
55+
if [ "${SUCCESS}" -eq "0" ]; then
56+
exit 0
57+
else
58+
exit 1
59+
fi
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
"""
2+
LM eval harness on model to compare vs HF baseline computed offline.
3+
Configs are found in configs/$MODEL.yaml
4+
5+
* export LM_EVAL_TEST_DATA_FILE=configs/Meta-Llama-3-70B-Instruct.yaml
6+
* export LM_EVAL_TP_SIZE=4
7+
* pytest -s test_lm_eval_correctness.py
8+
"""
9+
10+
import os
11+
from pathlib import Path
12+
13+
import lm_eval
14+
import numpy
15+
import yaml
16+
17+
RTOL = 0.02
18+
TEST_DATA_FILE = os.environ.get(
19+
"LM_EVAL_TEST_DATA_FILE",
20+
".buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct.yaml")
21+
22+
TP_SIZE = os.environ.get("LM_EVAL_TP_SIZE", 1)
23+
24+
25+
def launch_lm_eval(eval_config):
26+
model_args = f"pretrained={eval_config['model_name']}," \
27+
f"tensor_parallel_size={TP_SIZE}"
28+
29+
results = lm_eval.simple_evaluate(
30+
model="vllm",
31+
model_args=model_args,
32+
tasks=[task["name"] for task in eval_config["tasks"]],
33+
num_fewshot=eval_config["num_fewshot"],
34+
limit=eval_config["limit"],
35+
batch_size="auto")
36+
37+
return results
38+
39+
40+
def test_lm_eval_correctness():
41+
eval_config = yaml.safe_load(
42+
Path(TEST_DATA_FILE).read_text(encoding="utf-8"))
43+
44+
# Launch eval requests.
45+
results = launch_lm_eval(eval_config)
46+
47+
# Confirm scores match ground truth.
48+
for task in eval_config["tasks"]:
49+
for metric in task["metrics"]:
50+
ground_truth = metric["value"]
51+
measured_value = results["results"][task["name"]][metric["name"]]
52+
print(f'{task["name"]} | {metric["name"]}: '
53+
f'ground_truth={ground_truth} | measured={measured_value}')
54+
assert numpy.isclose(ground_truth, measured_value, rtol=RTOL)

.buildkite/test-pipeline.yaml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -197,6 +197,22 @@ steps:
197197
- pip install aiohttp
198198
- bash run-benchmarks.sh
199199

200+
- label: LM Eval Small Models
201+
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
202+
commands:
203+
- pip install lm-eval
204+
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
205+
- bash ./run-tests.sh -c configs/models-small.txt -t 1
206+
207+
- label: LM Eval Large Models
208+
gpu: a100
209+
num_gpus: 4
210+
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
211+
commands:
212+
- pip install lm-eval
213+
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
214+
- bash ./run-tests.sh -c configs/models-large.txt -t 4
215+
200216
- label: Documentation Build
201217
working_dir: "/vllm-workspace/test_docs/docs"
202218
no_gpu: True

0 commit comments

Comments
 (0)