Skip to content

MV2 cpp and python example #13

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,7 @@
xcuserdata/
.swiftpm/
*.xcworkspace/

# MV2
mv2/cpp/build

4 changes: 4 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
[submodule "mv2/cpp/executorch"]
path = mv2/cpp/executorch
url = https://github.com/pytorch/executorch.git
branch = release/0.6
40 changes: 40 additions & 0 deletions mv2/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
project(executorch_mv2_demo CXX)

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

# Set options for executorch build.
option(EXECUTORCH_ENABLE_LOGGING "" ON)
option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "" ON)
option(EXECUTORCH_BUILD_EXTENSION_MODULE "" ON)
option(EXECUTORCH_BUILD_EXTENSION_TENSOR "" ON)
option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED "" ON)
option(EXECUTORCH_BUILD_XNNPACK "" ON)

# Add ExecutorTorch subdirectory
add_subdirectory("executorch")

set(DEMO_SOURCES main.cpp)

# Create executable
add_executable(executorch_mv2_demo_app ${DEMO_SOURCES})

# Include directories
target_include_directories(executorch_mv2_demo_app PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})

# Link libraries
target_link_libraries(
executorch_mv2_demo_app
PRIVATE executorch
extension_module_static
extension_tensor
optimized_native_cpu_ops_lib
xnnpack_backend
)

# Set output directory
set_target_properties(executorch_mv2_demo_app
PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
)
44 changes: 44 additions & 0 deletions mv2/cpp/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# ExecutorTorch MobileNetV2 Demo C++ Application

This is a simple C++ demo application that uses the ExecutorTorch library for MobileNetV2 model inference.

## Build instructions

0. Export the model. See [mv2/python/README.md](../python/README.md)

1. The ExecuTorch repository is already configured as a git submodule at `~/executorch-examples/cpp/executorch/`. To initialize it:
```bash
cd ~/executorch-examples/
git submodule sync
git submodule update --init --recursive
```

2. Install dev requirements for ExecuTorch

```bash
cd ~/executorch-examples/mv2/cpp/executorch
pip install -r requirements-dev.txt
```

3. Build the project:
```bash
cd ~/executorch-examples/mv2/cpp
chmod +x build.sh
./build.sh
```

4. Run the demo application:
```bash
./build/bin/executorch_mv2_demo_app
```

## Dependencies

- CMake 3.18 or higher
- C++17 compatible compiler
- ExecutorTorch library (release/0.6)

## Notes

- Make sure you have the correct model file (`.pte`) compatible with ExecutorTorch.
- This demo currently initializes the input tensor with random data. In a real application, you would replace this with actual input data.
14 changes: 14 additions & 0 deletions mv2/cpp/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#!/bin/bash
set -e

# Create build directory if it doesn't exist
mkdir -p build
cd build

# Configure CMake
cmake -DCMAKE_BUILD_TYPE=Release ..

# Build the project
cmake --build . -j$(nproc)

echo "Build complete! Executable located at: ./bin/executorch_mv2_demo_app"
1 change: 1 addition & 0 deletions mv2/cpp/executorch
Submodule executorch added at 982041
23 changes: 23 additions & 0 deletions mv2/cpp/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#include <executorch/extension/module/module.h>
#include <executorch/extension/tensor/tensor.h>
#include <iostream>

using namespace ::executorch::extension;

int main(int argc, char* argv[]) {
// Load the model.
Module module("../python/model_mv2_xnnpack.pte");

// Create an input tensor.
float input[1 * 3 * 224 * 224];
auto tensor = from_blob(input, {1, 3, 224, 224});

// Perform an inference.
const auto result = module.forward(tensor);

if (result.ok()) {
// Retrieve the output data.
const auto output = result->at(0).toTensor().const_data_ptr<float>();
std::cout << "Success" << std::endl;
}
}
24 changes: 24 additions & 0 deletions mv2/python/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
### Virtual environment setup
Create and activate a Python virtual environment:
```bash
python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip
```
Or alternatively, [install conda on your machine](https://conda.io/projects/conda/en/latest/user-guide/install/index.html)
```bash
conda create -yn executorch-examples-mv2 python=3.10.0 && conda activate executorch-examples-mv2
```

### Install dependencies
```
pip install -r requirements.txt
```

### Export a model
```
python export.py
```

### Run model via pybind
```
python run.py
```
16 changes: 16 additions & 0 deletions mv2/python/export.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import torch
import torchvision.models as models
from torchvision.models.mobilenetv2 import MobileNet_V2_Weights
from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner
from executorch.exir import to_edge_transform_and_lower

model = models.mobilenetv2.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).eval()
sample_inputs = (torch.randn(1, 3, 224, 224), )

et_program = to_edge_transform_and_lower(
torch.export.export(model, sample_inputs),
partitioner=[XnnpackPartitioner()]
).to_executorch()

with open("model_mv2_xnnpack.pte", "wb") as f:
f.write(et_program.buffer)
1 change: 1 addition & 0 deletions mv2/python/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
executorch==0.6.0
20 changes: 20 additions & 0 deletions mv2/python/run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import torch
from executorch.runtime import Runtime
from typing import List

runtime = Runtime.get()

input_tensor: torch.Tensor = torch.randn(1, 3, 224, 224)
program = runtime.load_program("model_mv2_xnnpack.pte")
method = program.load_method("forward")
output: List[torch.Tensor] = method.execute([input_tensor])
print("Run succesfully via executorch")

from torchvision.models.mobilenetv2 import MobileNet_V2_Weights
import torchvision.models as models

eager_reference_model = models.mobilenetv2.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).eval()
eager_reference_output = eager_reference_model(input_tensor)

print("Comparing against original PyTorch module")
print(torch.allclose(output[0], eager_reference_output, rtol=1e-3, atol=1e-5))