Skip to content
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ jobs:
export CM_REPOS=$HOME/GH_CM
pip install --upgrade cm4mlops
pip install tabulate
cm run script --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --device=rocm --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet
cm run script --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet
# cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=main --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ jobs:
export CM_REPOS=$HOME/GH_CM
pip install --upgrade cm4mlops
pip install tabulate
cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --device=cpu --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet
cm run script --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet
cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=main --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c
2 changes: 1 addition & 1 deletion .github/workflows/test-mlperf-inference-llama2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ name: MLPerf inference LLAMA 2 70B

on:
schedule:
- cron: "14 14 * * *"
- cron: "14 14 * * 5"

jobs:
build_reference:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test-mlperf-inference-mixtral.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ name: MLPerf inference MIXTRAL-8x7B

on:
schedule:
- cron: "45 14 * * *" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST
- cron: "45 4 * * 5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST

jobs:
build_reference:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations

on:
schedule:
- cron: "49 19 * * *" #to be adjusted
- cron: "49 1 * * *" #to be adjusted

jobs:
run_nvidia:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test-scc24-sdxl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: MLPerf inference SDXL (SCC)

on:
schedule:
- cron: "35 19 * * *"
- cron: "35 3 * * *"

jobs:
build_reference:
Expand Down
7 changes: 7 additions & 0 deletions automation/script/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -5239,6 +5239,13 @@ def update_state_from_meta(meta, env, state, const, const_state, deps, post_deps
update_env = meta.get('env', {})
env.update(update_env)

update_env_if_env = meta.get('update_env_if_env', {})
if update_env_if_env:
if not is_dep_tobe_skipped(update_env_if_env, env):
c_env = update_env_if_env.get('env', {})
if c_env:
env.update(c_env)

update_const = meta.get('const', {})
if update_const:
const.update(update_const)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
---
resnet50:
Offline:
target_qps: 1000.0
Server:
target_qps: 500.0
retinanet:
Offline:
target_qps: 50.0
Server:
target_qps: 30
bert-99:
Offline:
target_qps: 100
bert-99.9:
Offline:
target_qps: 100
3d-unet-99:
Offline:
target_qps: 1.0
3d-unet-99.9:
Offline:
target_qps: 1.0
gptj-99.9:
Offline:
target_qps: 0.5
Server:
target_qps: 0.3
gptj-99:
Offline:
target_qps: 0.5
Server:
target_qps: 0.3
sdxl:
Offline:
target_qps: 0.1
Server:
target_qps: 0.1
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
---
resnet50:
Offline:
target_qps: 1000.0
Server:
target_qps: 500.0
retinanet:
Offline:
target_qps: 50.0
Server:
target_qps: 30
bert-99:
Offline:
target_qps: 100
bert-99.9:
Offline:
target_qps: 100
3d-unet-99:
Offline:
target_qps: 1.0
3d-unet-99.9:
Offline:
target_qps: 1.0
gptj-99.9:
Offline:
target_qps: 0.5
Server:
target_qps: 0.3
gptj-99:
Offline:
target_qps: 0.5
Server:
target_qps: 0.3
sdxl:
Offline:
target_qps: 0.1
Server:
target_qps: 0.1
6 changes: 0 additions & 6 deletions script/run-mlperf-inference-app/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -265,9 +265,6 @@ variations:
tags: _size.50,_with-sample-ids
nvidia-preprocess-data:
extra_cache_tags: "scc24-base"
inference-src:
tags: _branch.dev
version: custom
deps:
- tags: clean,nvidia,scratch,_sdxl,_downloaded-data
extra_cache_rm_tags: scc24-main
Expand All @@ -282,9 +279,6 @@ variations:
tags: _size.500,_with-sample-ids
nvidia-preprocess-data:
extra_cache_tags: "scc24-main"
inference-src:
tags: _branch.dev
version: custom
env:
CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX4: scc24-main
CM_DOCKER_IMAGE_NAME: scc24
Expand Down