Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflows/test-mlperf-inference-dlrm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ on:

jobs:
build_reference:
if: github.repository_owner == 'gateoverflow'
if: github.repository_owner == 'gateoverflow_off'
runs-on: [ self-hosted, GO-spr, linux, x64 ]
strategy:
fail-fast: false
matrix:
python-version: [ "3.12" ]
backend: [ "pytorch" ]
device: [ "cpu", "cuda" ]
device: [ "cpu" ]

steps:
- name: Test MLPerf Inference DLRM-v2 reference implementation
Expand All @@ -29,7 +29,7 @@ jobs:
cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean

build_intel:
if: github.repository_owner == 'gateoverflow'
if: github.repository_owner == 'gateoverflow_off'
runs-on: [ self-hosted, GO-spr, linux, x64 ]
strategy:
fail-fast: false
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test-mlperf-inference-sdxl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ jobs:
export CM_REPOS=$HOME/GH_CM
python3 -m pip install cm4mlops
cm pull repo
cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --custom_system_nvidia=yes --clean
cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean
cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions
7 changes: 7 additions & 0 deletions script/app-mlperf-inference-nvidia/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,13 @@ deps:
names:
- nvidia-inference-common-code

- tags: pull,git,repo
env:
CM_GIT_CHECKOUT_PATH: '<<<CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH>>>'
enable_if_env:
CM_MLPERF_INFERENCE_PULL_CODE_CHANGES:
'yes'

# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
names:
Expand Down
2 changes: 1 addition & 1 deletion script/process-mlperf-accuracy/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def preprocess(i):
if env.get('CM_SDXL_COMPLIANCE_IMAGES_PATH', '') != '':
extra_options += f" --compliance-images-path '{env['CM_SDXL_COMPLIANCE_IMAGES_PATH']}' "
else:
extra_options += f" --compliance-images-path {os.path.join(result_dir, 'images')} "
extra_options += f""" --compliance-images-path '{os.path.join(result_dir, "images")'} """

if env.get('CM_SDXL_ACCURACY_RUN_DEVICE', '') != '':
extra_options += f" --device '{env['CM_SDXL_ACCURACY_RUN_DEVICE']}' "
Expand Down
4 changes: 2 additions & 2 deletions script/pull-git-repo/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ path=${CM_GIT_CHECKOUT_PATH}
echo "cd $path"

cd $path
test $? -eq 0 || exit 1
test $? -eq 0 || exit $?

echo ${CM_GIT_PULL_CMD}
eval ${CM_GIT_PULL_CMD}
test $? -eq 0 || exit 1
test $? -eq 0 || exit $?

cd $CUR_DIR
2 changes: 2 additions & 0 deletions script/run-mlperf-inference-app/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ input_mapping:
model: CM_MLPERF_MODEL
multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY
network: CM_NETWORK_LOADGEN
nvidia_system_name: CM_NVIDIA_SYSTEM_NAME
offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS
output_dir: OUTPUT_BASE_DIR
output_summary: MLPERF_INFERENCE_SUBMISSION_SUMMARY
Expand All @@ -76,6 +77,7 @@ input_mapping:
precision: CM_MLPERF_MODEL_PRECISION
preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR
push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB
pull_changes: CM_MLPERF_INFERENCE_PULL_CODE_CHANGES
readme: CM_MLPERF_README
regenerate_accuracy_file: CM_MLPERF_REGENERATE_ACCURACY_FILE
regenerate_files: CM_REGENERATE_MEASURE_FILES
Expand Down