Skip to content

Commit c7e84de

Browse files
authored
Merge branch 'main' into refactor-bitwise-logical-tests
2 parents 19a78bf + 5098808 commit c7e84de

File tree

337 files changed

+7524
-2996
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

337 files changed

+7524
-2996
lines changed

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
295f2ed4d103017f7e19a7b8263ece606cd629db
1+
7ae0ce6360b6e4f944906502d20da24c04debee5

.ci/scripts/build_android_instrumentation.sh

Lines changed: 6 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -12,30 +12,10 @@ if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
1212
fi
1313
which "${PYTHON_EXECUTABLE}"
1414

15-
build_android_test() {
16-
mkdir -p extension/android/executorch_android/src/androidTest/resources
17-
cp extension/module/test/resources/add.pte extension/android/executorch_android/src/androidTest/resources
18-
pushd extension/android
19-
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:testDebugUnitTest
20-
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:assembleAndroidTest
21-
popd
22-
}
15+
mkdir -p "${BUILD_AAR_DIR}"/executorch_android/src/androidTest/resources
16+
cp extension/module/test/resources/add.pte "${BUILD_AAR_DIR}"/executorch_android/src/androidTest/resources
2317

24-
collect_artifacts_to_be_uploaded() {
25-
ARTIFACTS_DIR_NAME="$1"
26-
# Collect Java library test
27-
JAVA_LIBRARY_TEST_DIR="${ARTIFACTS_DIR_NAME}/library_test_dir"
28-
mkdir -p "${JAVA_LIBRARY_TEST_DIR}"
29-
cp extension/android/executorch_android/build/outputs/apk/androidTest/debug/*.apk "${JAVA_LIBRARY_TEST_DIR}"
30-
}
31-
32-
main() {
33-
build_android_test
34-
if [ -n "$ARTIFACTS_DIR_NAME" ]; then
35-
collect_artifacts_to_be_uploaded ${ARTIFACTS_DIR_NAME}
36-
fi
37-
}
38-
39-
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
40-
main "$@"
41-
fi
18+
pushd "${BUILD_AAR_DIR}"
19+
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:testDebugUnitTest
20+
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:assembleAndroidTest
21+
popd

.ci/scripts/gather_test_models.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from typing import Any
1515

1616
from examples.models import MODEL_NAME_TO_MODEL
17-
from examples.xnnpack import MODEL_NAME_TO_OPTIONS
17+
from examples.xnnpack import MODEL_NAME_TO_OPTIONS, QuantType
1818

1919
DEFAULT_RUNNERS = {
2020
"linux": "linux.2xlarge",
@@ -33,7 +33,7 @@
3333
"dl3": "linux.4xlarge.memory",
3434
"emformer_join": "linux.4xlarge.memory",
3535
"emformer_predict": "linux.4xlarge.memory",
36-
"phi-4-mini": "linux.4xlarge.memory",
36+
"phi_4_mini": "linux.4xlarge.memory",
3737
}
3838
}
3939

@@ -154,7 +154,7 @@ def export_models_for_ci() -> dict[str, dict]:
154154
if backend == "xnnpack":
155155
if name not in MODEL_NAME_TO_OPTIONS:
156156
continue
157-
if MODEL_NAME_TO_OPTIONS[name].quantization:
157+
if MODEL_NAME_TO_OPTIONS[name].quantization != QuantType.NONE:
158158
backend += "-quantization"
159159

160160
if MODEL_NAME_TO_OPTIONS[name].delegation:

.ci/scripts/test_llama_torchao_lowbit.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@ ${PYTHON_EXECUTABLE} -m examples.models.llama.export_llama \
7878
-qmode "torchao:8da${QLINEAR_BITWIDTH}w" \
7979
--group_size ${QLINEAR_GROUP_SIZE} \
8080
-E "torchao:${QEMBEDDING_BITWIDTH},${QEMBEDDING_GROUP_SIZE}" \
81-
--disable_dynamic_shape \
8281
-d fp32
8382

8483
# Test run

.ci/scripts/test_model.sh

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -96,15 +96,15 @@ test_model() {
9696
bash examples/models/llama/install_requirements.sh
9797
# Test export_llama script: python3 -m examples.models.llama.export_llama.
9898
# Use Llama random checkpoint with Qwen 2.5 1.5b model configuration.
99-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/qwen2_5/1_5b_config.json
99+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -p examples/models/qwen2_5/1_5b_config.json
100100
rm "./${MODEL_NAME}.pte"
101101
return # Skip running with portable executor runnner since portable doesn't support Qwen's biased linears.
102102
fi
103-
if [[ "${MODEL_NAME}" == "phi-4-mini" ]]; then
103+
if [[ "${MODEL_NAME}" == "phi_4_mini" ]]; then
104104
# Install requirements for export_llama
105105
bash examples/models/llama/install_requirements.sh
106106
# Test export_llama script: python3 -m examples.models.llama.export_llama.
107-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/phi-4-mini/config.json
107+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -p examples/models/phi_4_mini/config.json
108108
run_portable_executor_runner
109109
rm "./${MODEL_NAME}.pte"
110110
return
@@ -224,19 +224,22 @@ test_model_with_coreml() {
224224

225225
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}"
226226
EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
227-
# TODO:
227+
228228
if [ -n "$EXPORTED_MODEL" ]; then
229229
EXPORTED_MODEL_WITH_DTYPE="${EXPORTED_MODEL%.pte}_${DTYPE}.pte"
230230
mv "$EXPORTED_MODEL" "$EXPORTED_MODEL_WITH_DTYPE"
231231
EXPORTED_MODEL="$EXPORTED_MODEL_WITH_DTYPE"
232-
echo "Renamed file path: $EXPORTED_MODEL"
232+
echo "OK exported model: $EXPORTED_MODEL"
233233
else
234-
echo "No .pte file found"
234+
echo "[error] failed to export model: no .pte file found"
235235
exit 1
236236
fi
237237

238238
# Run the model
239239
if [ "${should_test}" = true ]; then
240+
echo "Installing requirements needed to build coreml_executor_runner..."
241+
backends/apple/coreml/scripts/install_requirements.sh
242+
240243
echo "Testing exported model with coreml_executor_runner..."
241244
local out_dir=$(mktemp -d)
242245
COREML_EXECUTOR_RUNNER_OUT_DIR="${out_dir}" examples/apple/coreml/scripts/build_executor_runner.sh

.ci/scripts/unittest-linux.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ if [[ "$BUILD_TOOL" == "cmake" ]]; then
2121
source .ci/scripts/setup-vulkan-linux-deps.sh
2222

2323
PYTHON_EXECUTABLE=python \
24-
EXECUTORCH_BUILD_PYBIND=ON \
25-
CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
24+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
2625
.ci/scripts/setup-linux.sh "$@"
2726

2827
# Install llama3_2_vision dependencies.

.ci/scripts/unittest-macos-buck2.sh

100644100755
File mode changed.

.ci/scripts/unittest-macos.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ trap 'rm -rfv ${TMP_DIR}' EXIT
2121

2222
# Setup MacOS dependencies as there is no Docker support on MacOS atm
2323
PYTHON_EXECUTABLE=python \
24-
EXECUTORCH_BUILD_PYBIND=ON \
25-
CMAKE_ARGS="-DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
24+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
2625
${CONDA_RUN} --no-capture-output \
2726
.ci/scripts/setup-macos.sh "$@"
2827

.ci/scripts/utils.sh

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,12 +60,46 @@ install_pytorch_and_domains() {
6060
# Fetch the target commit
6161
pushd pytorch || return
6262
git checkout "${TORCH_VERSION}"
63-
git submodule update --init --recursive
6463

65-
export USE_DISTRIBUTED=1
66-
# Then build and install PyTorch
67-
python setup.py bdist_wheel
68-
pip install "$(echo dist/*.whl)"
64+
local system_name=$(uname)
65+
if [[ "${system_name}" == "Darwin" ]]; then
66+
local platform=$(python -c 'import sysconfig; import platform; v=platform.mac_ver()[0].split(".")[0]; platform=sysconfig.get_platform().split("-"); platform[1]=f"{v}_0"; print("_".join(platform))')
67+
fi
68+
local python_version=$(python -c 'import platform; v=platform.python_version_tuple(); print(f"{v[0]}{v[1]}")')
69+
local torch_release=$(cat version.txt)
70+
local torch_short_hash=${TORCH_VERSION:0:7}
71+
local torch_wheel_path="cached_artifacts/pytorch/executorch/pytorch_wheels/${system_name}/${python_version}"
72+
local torch_wheel_name="torch-${torch_release}%2Bgit${torch_short_hash}-cp${python_version}-cp${python_version}-${platform:-}.whl"
73+
74+
local cached_torch_wheel="https://gha-artifacts.s3.us-east-1.amazonaws.com/${torch_wheel_path}/${torch_wheel_name}"
75+
# Cache PyTorch wheel is only needed on MacOS, Linux CI already has this as part
76+
# of the Docker image
77+
local torch_wheel_not_found=0
78+
if [[ "${system_name}" == "Darwin" ]]; then
79+
pip install "${cached_torch_wheel}" || torch_wheel_not_found=1
80+
else
81+
torch_wheel_not_found=1
82+
fi
83+
84+
# Found no such wheel, we will build it from source then
85+
if [[ "${torch_wheel_not_found}" == "1" ]]; then
86+
echo "No cached wheel found, continue with building PyTorch at ${TORCH_VERSION}"
87+
88+
git submodule update --init --recursive
89+
USE_DISTRIBUTED=1 python setup.py bdist_wheel
90+
pip install "$(echo dist/*.whl)"
91+
92+
# Only AWS runners have access to S3
93+
if command -v aws && [[ -z "${GITHUB_RUNNER:-}" ]]; then
94+
for wheel_path in dist/*.whl; do
95+
local wheel_name=$(basename "${wheel_path}")
96+
echo "Caching ${wheel_name}"
97+
aws s3 cp "${wheel_path}" "s3://gha-artifacts/${torch_wheel_path}/${wheel_name}"
98+
done
99+
fi
100+
else
101+
echo "Use cached wheel at ${cached_torch_wheel}"
102+
fi
69103

70104
# Grab the pinned audio and vision commits from PyTorch
71105
TORCHAUDIO_VERSION=$(cat .github/ci_commit_pins/audio.txt)

.ci/scripts/wheel/envvar_base.sh

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,10 @@
88
# should typically only contain shell variable assignments. Be sure to export
99
# any variables so that subprocesses will see them.
1010

11-
# Enable pybindings so that users can execute ExecuTorch programs from python.
12-
export EXECUTORCH_BUILD_PYBIND=1
13-
1411
# Ensure that CMAKE_ARGS is defined before referencing it. Defaults to empty
1512
# if not defined.
1613
export CMAKE_ARGS="${CMAKE_ARGS:-}"
1714

1815
# Link the XNNPACK backend into the pybindings runtime so that users can execute
1916
# ExecuTorch programs that delegate to it.
20-
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_XNNPACK=ON"
17+
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON"

.ci/scripts/wheel/test_macos.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,9 @@
1515
model=Model.Mv3,
1616
backend=Backend.XnnpackQuantizationDelegation,
1717
),
18-
# Enable this once CoreML is suppported out-of-the-box
19-
# https://github.com/pytorch/executorch/issues/9019
20-
# test_base.ModelTest(
21-
# model=Model.Mv3,
22-
# backend=Backend.CoreMlTest,
23-
# )
18+
test_base.ModelTest(
19+
model=Model.Mv3,
20+
backend=Backend.CoreMlTest,
21+
),
2422
]
2523
)

.github/workflows/_android.yml

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,16 +27,21 @@ jobs:
2727
conda activate "${CONDA_ENV}"
2828
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool buck2
2929
export ARTIFACTS_DIR_NAME=artifacts-to-be-uploaded
30-
31-
mkdir -p ${ARTIFACTS_DIR_NAME}/fp32-xnnpack-custom
32-
bash examples/models/llama/install_requirements.sh
33-
bash ".ci/scripts/test_llama.sh" -model stories110M -build_tool cmake -dtype fp16 -mode portable -upload ${ARTIFACTS_DIR_NAME}/fp32-xnnpack-custom
30+
mkdir -p ${ARTIFACTS_DIR_NAME}/
3431
3532
# Build LLM Demo for Android
3633
export BUILD_AAR_DIR=aar-out
3734
mkdir -p $BUILD_AAR_DIR
38-
bash scripts/build_android_library.sh ${ARTIFACTS_DIR_NAME}
39-
bash .ci/scripts/build_android_instrumentation.sh ${ARTIFACTS_DIR_NAME}
35+
bash scripts/build_android_library.sh
36+
cp ${BUILD_AAR_DIR}/executorch.aar $ARTIFACTS_DIR_NAME
37+
38+
mkdir -p ${ARTIFACTS_DIR_NAME}/library_test_dir
39+
bash .ci/scripts/build_android_instrumentation.sh
40+
cp ${BUILD_AAR_DIR}/executorch_android/build/outputs/apk/androidTest/debug/executorch_android-debug-androidTest.apk "${ARTIFACTS_DIR_NAME}/library_test_dir"
41+
42+
mkdir -p ${ARTIFACTS_DIR_NAME}/fp32-xnnpack-custom
43+
bash examples/models/llama/install_requirements.sh
44+
bash ".ci/scripts/test_llama.sh" -model stories110M -build_tool cmake -dtype fp16 -mode portable -upload ${ARTIFACTS_DIR_NAME}/fp32-xnnpack-custom
4045
4146
mkdir -p examples/demo-apps/android/LlamaDemo/app/libs
4247
cp aar-out/executorch.aar examples/demo-apps/android/LlamaDemo/app/libs
@@ -96,7 +101,7 @@ jobs:
96101
curl -O https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/llm_demo/app-debug.apk
97102
curl -O https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/llm_demo/app-debug-androidTest.apk
98103
curl -O https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/fp32-xnnpack-custom/model.zip
99-
curl -o android-test-debug-androidTest.apk https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/library_test_dir/executorch-debug-androidTest.apk
104+
curl -o android-test-debug-androidTest.apk https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/library_test_dir/executorch_android-debug-androidTest.apk
100105
unzip model.zip
101106
mv *.pte model.pte
102107

.github/workflows/_unittest.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,4 +49,6 @@ jobs:
4949
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
5050
script: |
5151
set -eux
52+
# This is needed to get the prebuilt PyTorch wheel from S3
53+
${CONDA_RUN} --no-capture-output pip install awscli==1.37.21
5254
.ci/scripts/unittest-macos.sh --build-tool "${{ inputs.build-tool }}" --build-mode "${{ inputs.build-mode }}" --editable "${{ inputs.editable }}"

.github/workflows/build-wheels-linux.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ on:
66
paths:
77
- .ci/**/*
88
- .github/workflows/build-wheels-linux.yml
9+
- examples/**/*
10+
- pyproject.toml
11+
- setup.py
912
push:
1013
branches:
1114
- nightly

.github/workflows/build-wheels-macos.yml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ on:
66
paths:
77
- .ci/**/*
88
- .github/workflows/build-wheels-macos.yml
9+
- examples/**/*
10+
- pyproject.toml
11+
- setup.py
912
push:
1013
branches:
1114
- nightly
@@ -57,6 +60,8 @@ jobs:
5760
pre-script: ${{ matrix.pre-script }}
5861
post-script: ${{ matrix.post-script }}
5962
package-name: ${{ matrix.package-name }}
60-
runner-type: macos-m1-stable
63+
# Meta's macOS runners do not have Xcode, so use GitHub's runners.
64+
runner-type: macos-latest-xlarge
65+
setup-miniconda: true
6166
smoke-test-script: ${{ matrix.smoke-test-script }}
6267
trigger-event: ${{ github.event_name }}

.github/workflows/pull.yml

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ jobs:
106106
- model: emformer_join
107107
backend: xnnpack-quantization-delegation
108108
runner: linux.4xlarge.memory
109-
- model: phi-4-mini
109+
- model: phi_4_mini
110110
backend: portable
111111
runner: linux.4xlarge.memory
112112
- model: llama3_2_vision_encoder
@@ -365,8 +365,7 @@ jobs:
365365
# build module for executorch.extension.pybindings.portable_lib
366366
BUILD_TOOL="cmake"
367367
PYTHON_EXECUTABLE=python \
368-
EXECUTORCH_BUILD_XNNPACK=ON \
369-
EXECUTORCH_BUILD_PYBIND=ON \
368+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON" \
370369
bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"
371370
372371
# see if we can import the module successfully
@@ -504,7 +503,7 @@ jobs:
504503
505504
# Setup MacOS dependencies as there is no Docker support on MacOS atm
506505
PYTHON_EXECUTABLE=python \
507-
EXECUTORCH_BUILD_PYBIND=ON \
506+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON" \
508507
EXECUTORCH_BUILD_ARM_BAREMETAL=ON \
509508
.ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"
510509

.github/workflows/trunk.yml

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -65,22 +65,29 @@ jobs:
6565
matrix:
6666
model: [linear, add, add_mul, ic3, ic4, mv2, mv3, resnet18, resnet50, vit, w2l, mobilebert, emformer_join, emformer_transcribe]
6767
backend: [portable, xnnpack-quantization-delegation]
68+
runner: [linux.arm64.2xlarge]
6869
include:
6970
- model: lstm
7071
backend: portable
72+
runner: linux.arm64.2xlarge
7173
- model: mul
7274
backend: portable
75+
runner: linux.arm64.2xlarge
7376
- model: softmax
7477
backend: portable
75-
- model: phi-4-mini
78+
runner: linux.arm64.2xlarge
79+
- model: phi_4_mini
7680
backend: portable
81+
runner: linux.arm64.m7g.4xlarge
7782
- model: qwen2_5
7883
backend: portable
84+
runner: linux.arm64.2xlarge
7985
- model: llama3_2_vision_encoder
8086
backend: portable
87+
runner: linux.arm64.2xlarge
8188
fail-fast: false
8289
with:
83-
runner: linux.arm64.2xlarge
90+
runner: ${{ matrix.runner }}
8491
docker-image: executorch-ubuntu-22.04-gcc11-aarch64
8592
submodules: 'true'
8693
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
@@ -228,7 +235,7 @@ jobs:
228235
name: test-coreml-delegate
229236
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
230237
with:
231-
runner: macos-13-xlarge
238+
runner: macos-latest-xlarge
232239
python-version: '3.11'
233240
submodules: 'true'
234241
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
@@ -261,7 +268,7 @@ jobs:
261268
262269
# build module for executorch.extension.pybindings.portable_lib
263270
BUILD_TOOL=${{ matrix.build-tool }}
264-
EXECUTORCH_BUILD_PYBIND=ON PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
271+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON" PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh --build-tool "${BUILD_TOOL}"
265272
266273
# see if we can import the module successfully
267274
${CONDA_RUN} python -c "from executorch.extension.pybindings import portable_lib; print('success!')"
@@ -536,9 +543,8 @@ jobs:
536543
git clone https://github.com/huggingface/optimum-executorch
537544
cd optimum-executorch
538545
# There is no release yet, for CI stability, always test from the same commit on main
539-
git checkout 6a7e83f3eee2976fa809335bfb78a45b1ea1cb25
540-
pip install .
541-
pip install accelerate sentencepiece
546+
git checkout 577a2b19670e4c643a5c6ecb09bf47b9a699e7c6
547+
pip install .[tests]
542548
pip list
543549
echo "::endgroup::"
544550

0 commit comments

Comments
 (0)