Skip to content

Commit 61e79ce

Browse files
committed
Update
[ghstack-poisoned]
2 parents d5ef3f1 + 62e49ce commit 61e79ce

File tree

114 files changed

+3111
-1194
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

114 files changed

+3111
-1194
lines changed

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0a94bb432ed75cc2d950d81b2921363218a7e459
1+
27e35de6c288bffad1b4d18b393579c1d1a95547

.ci/docker/conda-env-ci.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
cmake=3.22.1
22
ninja=1.10.2
33
libuv
4+
llvm-openmp
45
pkg-config

.ci/scripts/setup-macos.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ setup_macos_env_variables
121121
# NB: we need buck2 in all cases because cmake build also depends on calling
122122
# buck2 atm
123123
install_buck
124+
brew install libomp
124125
install_pip_dependencies
125126

126127
# TODO(huydhn): Unlike our self-hosted runner, GitHub runner doesn't have access

.github/workflows/doc-build.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,8 @@ jobs:
8484
needs: build
8585
if: github.repository == 'pytorch/executorch' && github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v'))
8686
permissions:
87+
id-token: write
8788
contents: write
88-
contents: read
8989
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
9090
with:
9191
repository: pytorch/executorch

.github/workflows/pull.yml

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -212,17 +212,14 @@ jobs:
212212
docker-image: executorch-ubuntu-22.04-clang12
213213
submodules: 'true'
214214
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
215-
timeout: 90
215+
timeout: 180
216216
script: |
217217
# The generic Linux job chooses to use base env, not the one setup by the image
218218
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
219219
conda activate "${CONDA_ENV}"
220220
221221
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
222222
223-
# install pybind
224-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
225-
226223
# install Llava requirements
227224
bash examples/models/llama/install_requirements.sh
228225
bash examples/models/llava/install_requirements.sh
@@ -483,9 +480,6 @@ jobs:
483480
484481
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
485482
486-
# install pybind
487-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
488-
489483
# install phi-3-mini requirements
490484
bash examples/models/phi-3-mini/install_requirements.sh
491485
@@ -513,9 +507,6 @@ jobs:
513507
514508
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
515509
516-
# install pybind
517-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
518-
519510
# install llama requirements
520511
bash examples/models/llama/install_requirements.sh
521512
@@ -535,17 +526,14 @@ jobs:
535526
docker-image: executorch-ubuntu-22.04-clang12
536527
submodules: 'true'
537528
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
538-
timeout: 90
529+
timeout: 180
539530
script: |
540531
# The generic Linux job chooses to use base env, not the one setup by the image
541532
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
542533
conda activate "${CONDA_ENV}"
543534
544535
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
545536
546-
# install pybind
547-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
548-
549537
# install llama requirements
550538
bash examples/models/llama/install_requirements.sh
551539
@@ -573,9 +561,6 @@ jobs:
573561
574562
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
575563
576-
# install pybind
577-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
578-
579564
# install llama requirements
580565
bash examples/models/llama/install_requirements.sh
581566

.mypy.ini

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ ignore_missing_imports = True
7777
[mypy-ruamel]
7878
ignore_missing_imports = True
7979

80+
[mypy-serializer.*]
81+
ignore_missing_imports = True
82+
8083
[mypy-setuptools.*]
8184
ignore_missing_imports = True
8285

CMakeLists.txt

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,13 @@ cmake_dependent_option(
240240
"NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF
241241
)
242242

243+
244+
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
245+
set(EXECUTORCH_BUILD_EXTENSION_TENSOR ON)
246+
set(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER ON)
247+
set(EXECUTORCH_BUILD_EXTENSION_MODULE ON)
248+
endif()
249+
243250
if(EXECUTORCH_BUILD_KERNELS_CUSTOM_AOT)
244251
set(EXECUTORCH_BUILD_EXTENSION_TENSOR ON)
245252
set(EXECUTORCH_BUILD_KERNELS_CUSTOM ON)
@@ -791,6 +798,35 @@ if(EXECUTORCH_BUILD_PYBIND)
791798
install(TARGETS portable_lib
792799
LIBRARY DESTINATION executorch/extension/pybindings
793800
)
801+
802+
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
803+
804+
set(_pybind_training_dep_libs
805+
${TORCH_PYTHON_LIBRARY}
806+
etdump
807+
executorch
808+
util
809+
torch
810+
extension_training
811+
)
812+
813+
if(EXECUTORCH_BUILD_XNNPACK)
814+
# need to explicitly specify XNNPACK and microkernels-prod
815+
# here otherwise uses XNNPACK and microkernel-prod symbols from libtorch_cpu
816+
list(APPEND _pybind_training_dep_libs xnnpack_backend XNNPACK microkernels-prod)
817+
endif()
818+
819+
# pybind training
820+
pybind11_add_module(_training_lib SHARED extension/training/pybindings/_training_lib.cpp)
821+
822+
target_include_directories(_training_lib PRIVATE ${TORCH_INCLUDE_DIRS})
823+
target_compile_options(_training_lib PUBLIC ${_pybind_compile_options})
824+
target_link_libraries(_training_lib PRIVATE ${_pybind_training_dep_libs})
825+
826+
install(TARGETS _training_lib
827+
LIBRARY DESTINATION executorch/extension/training/pybindings
828+
)
829+
endif()
794830
endif()
795831

796832
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)

README.md

Lines changed: 42 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,37 @@
1-
# ExecuTorch
2-
3-
**ExecuTorch** is an end-to-end solution for enabling on-device inference
4-
capabilities across mobile and edge devices including wearables, embedded
5-
devices and microcontrollers. It is part of the PyTorch Edge ecosystem and
6-
enables efficient deployment of PyTorch models to edge devices.
1+
<div align="center">
2+
<img src="./docs/source/_static/img/et-logo.png" alt="Logo" width="200">
3+
<h1 align="center">ExecuTorch: A powerful on-device AI Framework</h1>
4+
</div>
5+
6+
7+
<div align="center">
8+
<a href="https://github.com/pytorch/executorch/graphs/contributors"><img src="https://img.shields.io/github/contributors/pytorch/executorch?style=for-the-badge&color=blue" alt="Contributors"></a>
9+
<a href="https://github.com/pytorch/executorch/stargazers"><img src="https://img.shields.io/github/stars/pytorch/executorch?style=for-the-badge&color=blue" alt="Stargazers"></a>
10+
<a href="https://discord.gg/MeacgB7A"><img src="https://img.shields.io/badge/Discord-Join%20Us-purple?logo=discord&logoColor=white&style=for-the-badge" alt="Join our Discord community"></a>
11+
<a href="https://pytorch.org/executorch/stable/index.html"><img src="https://img.shields.io/badge/Documentation-000?logo=googledocs&logoColor=FFE165&style=for-the-badge" alt="Check out the documentation"></a>
12+
<hr>
13+
</div>
14+
15+
**ExecuTorch** is an end-to-end solution for on-device inference and training. It powers much of Meta's on-device AI experiences across Facebook, Instagram, Meta Quest, Ray-Ban Meta Smart Glasses, WhatsApp, and more.
16+
17+
It supports a wide range of models including LLMs (Large Language Models), CV (Computer Vision), ASR (Automatic Speech Recognition), and TTS (Text to Speech).
18+
19+
Platform Support:
20+
- Operating Systems:
21+
- iOS
22+
- Mac
23+
- Android
24+
- Linux
25+
- Microcontrollers
26+
27+
- Hardware Acceleration:
28+
- Apple
29+
- Arm
30+
- Cadence
31+
- MediaTek
32+
- Qualcomm
33+
- Vulkan
34+
- XNNPACK
735

836
Key value propositions of ExecuTorch are:
937

@@ -17,35 +45,21 @@ Key value propositions of ExecuTorch are:
1745
experience due to a lightweight runtime and utilizing full hardware
1846
capabilities such as CPUs, NPUs, and DSPs.
1947

20-
For a comprehensive technical overview of ExecuTorch and step-by-step tutorials,
21-
please visit our documentation website [for the latest release](https://pytorch.org/executorch/stable/index.html) (or the [main branch](https://pytorch.org/executorch/main/index.html)).
22-
23-
Check out the [Getting Started](https://pytorch.org/executorch/stable/getting-started-setup.html#quick-setup-colab-jupyter-notebook-prototype) page for a quick spin.
24-
25-
Check out the examples of [Llama](./examples/models/llama/README.md), [Llava](./examples/models/llava/README.md) and [other models](./examples/README.md) running on edge devices using ExecuTorch.
48+
## Getting Started
49+
To get started you can:
2650

51+
- Visit the [Step by Step Tutorial](https://pytorch.org/executorch/main/index.html) on getting things running locally and deploy a model to a device
52+
- Use this [Colab Notebook](https://pytorch.org/executorch/stable/getting-started-setup.html#quick-setup-colab-jupyter-notebook-prototype) to start playing around right away
53+
- Jump straight into LLMs use cases by following specific instructions for [Llama](./examples/models/llama/README.md) and [Llava](./examples/models/llava/README.md)
2754

28-
**[UPDATE - 10/24]** We have added support for running [Llama 3.2 Quantized 1B/3B](./examples/models/llama/README.md) models via ExecuTorch.
29-
30-
## Feedback
55+
## Feedback and Engagement
3156

3257
We welcome any feedback, suggestions, and bug reports from the community to help
33-
us improve our technology. Please use the [PyTorch
34-
Forums](https://discuss.pytorch.org/c/executorch) for discussion and feedback
35-
about ExecuTorch using the **ExecuTorch** category, and our [GitHub
36-
repository](https://github.com/pytorch/executorch/issues) for bug reporting.
37-
38-
We recommend using the latest release tag from the
39-
[Releases](https://github.com/pytorch/executorch/releases) page when developing.
58+
us improve our technology. Check out the [Discussion Board](https://github.com/pytorch/executorch/discussions) or chat real time with us on [Discord](https://discord.gg/MeacgB7A)
4059

4160
## Contributing
4261

43-
See [CONTRIBUTING.md](CONTRIBUTING.md) for details about issues, PRs, code
44-
style, CI jobs, and other development topics.
45-
46-
To connect with us and other community members, we invite you to join PyTorch Slack community by filling out this [form](https://docs.google.com/forms/d/e/1FAIpQLSeADnUNW36fjKjYzyHDOzEB_abKQE9b6gqqW9NXse6O0MWh0A/viewform). Once you've joined, you can:
47-
* Head to the `#executorch-general` channel for general questions, discussion, and community support.
48-
* Join the `#executorch-contributors` channel if you're interested in contributing directly to project development.
62+
We welcome contributions. To get started review the [guidelines](CONTRIBUTING.md) and chat with us on [Discord](https://discord.gg/MeacgB7A)
4963

5064

5165
## Directory Structure

backends/arm/_passes/insert_table_ops.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class InsertTableOpsPass(ExportPass):
3131
"""
3232
For ops in self.table_ops they need to be serialized as a TOSA TABLE. This pass replaces these
3333
edge ops with a tosa._table(input: Tensor, target_str: str) where target_str == str(node.target).
34-
When loweringthe _table node target_str will be used to find the corresponding torch operator
34+
When lowering the _table node target_str will be used to find the corresponding torch operator
3535
which will be used to produce the table values in operators/op_table.py.
3636
"""
3737

@@ -43,6 +43,7 @@ class InsertTableOpsPass(ExportPass):
4343
exir_ops.edge.aten.sigmoid.default: torch.sigmoid,
4444
exir_ops.edge.aten.tanh.default: torch.tanh,
4545
exir_ops.edge.aten.hardsigmoid.default: torch.nn.functional.hardsigmoid,
46+
exir_ops.edge.aten.hardswish.default: torch.nn.functional.hardswish,
4647
}
4748

4849
def __init__(self, exported_program: ExportedProgram) -> None:

backends/arm/arm_partitioner.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ def ops_to_not_decompose(
115115
) -> Tuple[List[torch._ops.OpOverload], Optional[Callable[[torch.fx.Node], bool]]]:
116116
ops_to_not_decompose_if_quant_op = [
117117
torch.ops.aten.hardsigmoid.default,
118+
torch.ops.aten.hardswish.default,
118119
]
119120

120121
def filter_fn(node: torch.fx.Node) -> bool:

0 commit comments

Comments
 (0)