diff --git a/Makefile b/Makefile index e2f5ba2..e9f49e0 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ SHELL := /bin/bash PYTHON_VERSION = 3.10 -# Some variables +# Variables that will be used mainly for the Docker build PLATFORM = linux/arm64 EXAMPLE_DIR = examples DOCKER_RUN_FLAGS = --env NEPTUNE_API_TOKEN $(NEPTUNE_API_TOKEN) --env NEPTUNE_PROJECT $(NEPTUNE_PROJECT) --privileged --network=host --ulimit nofile=65536:65536 @@ -14,38 +14,56 @@ init: install: configure_commit_template uv sync -p $(PYTHON_VERSION) && uv lock +# Configure git commit template, this will help to write better commit messages configure-commit-template: git config --global commit.template $(realpath commit-template.txt) +# Pre-commit hooks are useful to run some checks (usually linters) before committing the code to the repository +# It will help to keep the code clean and consistent, this commands sets up the pre-commit hooks configre-pre-commit: uvx pre-commit install +# Run pre-commit hooks on all files, it will run the checks on all files in the repository format: uvx pre-commit run --all-files +# Run pytest to test the code unit tests under the 'tests/' directory test: uv run pytest +# Use uv to run the train script while passing the arguments from the command line train: uv run src/train.py ${ARGS} +# Use uv to run the evaluate script while passing the arguments from the command line evaluate: uv run src/evaluate.py ${ARGS} +# Build the Docker image with the base dependencies build-docker: docker build --target lightning-base -t lightning-base . +# Build the Docker image and jump into the container to test a fresh environment (CPU) dev-container-cpu: build-docker docker run $(DOCKER_RUN_FLAGS) -v $(PROJECT_ROOT):/app -it lightning-base:latest /bin/bash +# Build the Docker image and jump into the container to test a fresh environment (GPU) dev-container-gpu: build-docker docker run $(DOCKER_RUN_FLAGS_GPU) -v $(PROJECT_ROOT):/app -it lightning-base:latest /bin/bash +# Run the train script using the Docker image train-docker: docker-build docker run $(DOCKER_RUN_FLAGS) --user root -v $(PROJECT_ROOT):/app lightning-base:latest /bin/bash -i -c "uv run /app/src/train.py ${ARGS}" +# Run the evaluate script using the Docker image evaluate-docker: docker-build docker run $(DOCKER_RUN_FLAGS) --user root -v $(PROJECT_ROOT):/app lightning-base:latest /bin/bash -i -c "uv run /app/src/evaluate.py ${ARGS}" +# This build the documentation based on current code 'src/' and 'docs/' directories and deploy it to the gh-pages branch +# in your GitHub repository (you then need to setup the GitHub Pages to use the gh-pages branch) deploy-pages: uv run mkdocs build && uv run mkdocs gh-deploy + +# This is to run the documentation locally to see how it looks +serve-docs: + uv run mkdocs build && uv run mkdocs serve diff --git a/README.md b/README.md index fe7e39e..a9c5d40 100644 --- a/README.md +++ b/README.md @@ -23,12 +23,25 @@ A template for machine learning or deep learning projects. ## 🧠 Features -- [x] Easy to implement your own model and dataloader +- [x] Easy to implement your own model and dataloader through hydra instantiation of datamodules and models - [x] Configurable hyperparameters with Hydra - [x] Logging with the solution that fits your needs - [x] Works on CPU, multi-GPU, and multi-TPUs - -## ⚙️ Installation +- [x] Use bleeding edge UV to manage packages +- [x] pre-commits hooks to validate code style and quality +- [x] Hydra instantiation of models and dataloaders +- [x] torch.compile of models +- [x] Tensors typing validation with TorchTyping +- [x] Dockerized project (Dockerfile, run tests and training through docker, optionally docker-compose) +- [x] Examples of efficient multi-processing using python's pool map +- [x] Examples using polars for faster and more efficient dataframe processing +- [x] Example of mock tests using pytest +- [x] Util scripts to download dataset from kaggle +- [x] Cloud data retrieval using cloudpathlib (launch your training on AWS, GCP, Azure) +- [x] Architecture and example of creating the model serving API through LitServe +- [x] Wiki creation and setup of documentation website with best integrations through Mkdocs + +## ⚙️ Steps for Installation - [ ] Use this repository as a template - [ ] Clone your repository @@ -36,75 +49,127 @@ A template for machine learning or deep learning projects. - [ ] Add your model which inherits from `LightningModule` in `src/models` - [ ] Add your dataset which inherits from `Datamodule` in `src/data` - [ ] Add associated yaml configuration files in `configs/` folder following existing examples +- [ ] Read the commands in the Makefile to understand the available commands you can use + +## 🤠Tips and Tricks + +### 🐍 How does the project work? + +The `train.py` or `eval.py` script is the entry point of the project. It uses Hydra to instantiate the model (LightningModule), dataloader (DataModule), and trainer using the configuration reconstructed using Hydra. The model is then trained or evaluated using Pytorch Lightning. + +### Implementing your logic + +You don't need to worry about implementing the training loops, the support for different hardwares, reading of +configurations, etc. You need to care about 4 files for each training : your LightningModule (+ its hydra config), your +DataModule (+ its hydra config). + +In the LightningModule, you need to implement the following methods: + +- `forward method` +- `training_step` +- `validation_step` +- `test_step` +Get inspired by the provided examples in the `src/models` folder. +For the DataModule, you need to implement the following methods: + +- `prepare_data` +- `setup` +- `train_dataloader` +- `val_dataloader` +- `test_dataloader` + +Get inspired by the provided examples in the `src/data` folder. + +Get to know more about Pytorch Lightning's [LightningModule](https://pytorch-lightning.readthedocs.io/en/0.10.0/lightning_module.html) and [DataModule](https://pytorch-lightning.readthedocs.io/en/0.10.0/datamodules.html) in the Pytorch Lightning documentation. +Finally in the associated configs/ folder, you need to implement the yaml configuration files for the model and dataloader. + +### 🔍 The power of Hydra + +As Hydra is used for configuration, you can easily change the hyperparameters of your model, the dataloader, the trainer, etc. by changing the yaml configuration files in the `configs/` folder. You can also use the `--multirun` option to run multiple experiments with different configurations. + +But also, as it used to instantiate the model and dataloader, you can easily change the model, dataloader, or any other component by changing the yaml configuration files or DIRECTLY IN COMMAND LINE. This is especially useful when you want to use different models or dataloaders. + +For example, you can run the following command to train a model with a different architecture, changing the dataset +used, and the trainer used: -## 🌳 Tree Explained +```bash +uv run src/train.py model=LeNet datamodule=MNISTDataModule trainer=gpu +``` + +Read more about Hydra in the [official documentation](https://hydra.cc/docs/intro/). + +### 💡 Best practices + +- Typing your functions and classes with `TorchTyping` for better type checking (in addition to python's typing module) +- Docstring your functions and classes, it is even more important as it is used to generate the documentation with Mkdocs +- Use the `make` commands to run your code, it is easier and faster than writing the full command (and check the Makefile for all available commands 😉) +- [Use the pre-commit hooks](https://pre-commit.com/) to ensure your code is formatted correctly and is of good quality +- [UV](https://docs.astral.sh/uv/ ) is powerful (multi-thread, package graph solving, rust backend, etc.) use it as much +as you can. + +### 📚 Documentation + +You have the possibility to generate a documentation website using Mkdocs. It will automatically generate the documentation based on both the markdown files in the `docs/` folder and the docstrings in your code. +To generate and serve the documentation locally: + +```bash +make serve-docs # Documentation will be available at http://localhost:8000 +``` + +And to deploy it to Github pages (youn need to enable Pages in your repository configuration and set it to use the +gh-pages branch): + +```bash +make pages-deploy # It will create a gh-pages branch and push the documentation to it +``` + +### 🌳 Tree Explained ``` . -├── commit-template.txt -├── configs -│   ├── callbacks -│   ├── data -│   ├── debug -│   ├── eval.yaml -│   ├── experiment -│   ├── extras -│   ├── hparams_search -│   ├── local -│   ├── logger -│   ├── model -│   ├── paths -│   ├── trainer -│   └── train.yaml -├── data -├── Makefile -├── notebooks -├── pyproject.toml -├── README.md -├── ruff.toml -├── scripts +├── commit-template.txt # use this file to set your commit message template, with make configure-commit template +├── configs # configuration files for hydra +│   ├── callbacks # configuration files for callbacks +│   ├── data # configuration files for datamodules +│   ├── debug # configuration files for pytorch lightning debuggers +│   ├── eval.yaml # configuration file for evaluation +│   ├── experiment # configuration files for experiments +│   ├── extras # configuration files for extra components +│   ├── hparams_search # configuration files for hyperparameters search +│   ├── local # configuration files for local training +│   ├── logger # configuration files for loggers (neptune, wandb, etc.) +│   ├── model # configuration files for models (LightningModule) +│   ├── paths # configuration files for paths +│   ├── trainer # configuration files for trainers (cpu, gpu, tpu) +│   └── train.yaml # configuration file for training +├── data # data folder (to store potentially downloaded datasets) +├── Makefile # makefile contains useful commands for the project +├── notebooks # notebooks folder +├── pyproject.toml # pyproject.toml file for uv package manager +├── README.md # this file +├── ruff.toml # ruff.toml file for pre-commit +├── scripts # scripts folder │   └── example_train.sh -├── src -│   ├── app.py -│   ├── data +├── src # source code folder +│   ├── data # datamodules folder │   │   ├── components │   │   └── mnist_datamodule.py -│   ├── Dockerfile -│   ├── eval.py -│   ├── models -│   │   ├── components -│   │   │   ├── __init__.py -│   │   │   └── simple_dense_net.py -│   │   ├── __init__.py -│   │   └── mnist_module.py -│   ├── train.py -│   └── utils -│   ├── instantiators.py -│   ├── logging_utils.py -│   ├── pylogger.py -│   ├── rich_utils.py -│   └── utils.py -└── tests - └── conftest.py +│   ├── eval.py # evaluation entry script +│   ├── models # models folder (LightningModule) +│   │   ├── components # components folder, contains model parts or "nets" +│   ├── train.py # training entry script +│   └── utils # utils folder +│   ├── instantiators.py # instantiators for models and dataloaders +│   ├── logging_utils.py # logger utils +│   ├── pylogger.py # multi-process and multi-gpu safe logging +│   ├── rich_utils.py # rich utils +│   └── utils.py # general utils like multi-processing, etc. +└── tests # tests folder + └── conftest.py # fixtures for tests + └── mock_test.py # example of mocking tests ```` -## 🔮 Incoming features for this template - -- [x] Add support for multi-GPU training -- [x] UV package manager setup -- [x] pre-commits hooks -- [x] Hydra instantiation of models and dataloaders -- [x] Add torch.compile of models -- [x] Integrate TorchTyping -- [x] Dockerize the project (Dockerfile, run tests and training through docker, optionally docker-compose) -- [x] Add example of efficient multi-processing using pool map -- [x] Add example using polars -- [x] Example mock tests -- [x] Util scripts to download dataset from kaggle for example -- [x] Cloud oriented scripts (launch your training on AWS, GCP, Azure) -- [x] Add config architecture and example of LitServe - ## 🤝 Contributing For more information on how to contribute to this project, please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file. diff --git a/docs/index.md b/docs/index.md index 2697a32..a9c5d40 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1 +1,187 @@ -# Welcome to the Machine Learning Project Template official documentation +
+ + + +

Machine Learning Project Template

+ +[![python](https://img.shields.io/badge/-Python_3.8_%7C_3.9_%7C_3.10-blue?logo=python&logoColor=white)](https://github.com/pre-commit/pre-commit) +[![pytorch](https://img.shields.io/badge/PyTorch_2.0+-ee4c2c?logo=pytorch&logoColor=white)](https://pytorch.org/get-started/locally/) +[![lightning](https://img.shields.io/badge/-Lightning_2.0+-792ee5?logo=pytorchlightning&logoColor=white)](https://pytorchlightning.ai/) +[![hydra](https://img.shields.io/badge/Config-Hydra_1.3-89b8cd)](https://hydra.cc/) +[![black](https://img.shields.io/badge/Code%20Style-Black-black.svg?labelColor=gray)](https://black.readthedocs.io/en/stable/) +[![isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)
+ +Click on [Use this template](https://github.com/rayanramoul/ml-project-template/generate) to start your own project! or go to the [Documentation](https://rayanramoul.github.io/ml-project-template/) for more information. + +
+A template for machine learning or deep learning projects. +
+ +
+ +
+ +## 🧠 Features + +- [x] Easy to implement your own model and dataloader through hydra instantiation of datamodules and models +- [x] Configurable hyperparameters with Hydra +- [x] Logging with the solution that fits your needs +- [x] Works on CPU, multi-GPU, and multi-TPUs +- [x] Use bleeding edge UV to manage packages +- [x] pre-commits hooks to validate code style and quality +- [x] Hydra instantiation of models and dataloaders +- [x] torch.compile of models +- [x] Tensors typing validation with TorchTyping +- [x] Dockerized project (Dockerfile, run tests and training through docker, optionally docker-compose) +- [x] Examples of efficient multi-processing using python's pool map +- [x] Examples using polars for faster and more efficient dataframe processing +- [x] Example of mock tests using pytest +- [x] Util scripts to download dataset from kaggle +- [x] Cloud data retrieval using cloudpathlib (launch your training on AWS, GCP, Azure) +- [x] Architecture and example of creating the model serving API through LitServe +- [x] Wiki creation and setup of documentation website with best integrations through Mkdocs + +## ⚙️ Steps for Installation + +- [ ] Use this repository as a template +- [ ] Clone your repository +- [ ] Run `make install` to install the dependencies +- [ ] Add your model which inherits from `LightningModule` in `src/models` +- [ ] Add your dataset which inherits from `Datamodule` in `src/data` +- [ ] Add associated yaml configuration files in `configs/` folder following existing examples +- [ ] Read the commands in the Makefile to understand the available commands you can use + +## 🤠Tips and Tricks + +### 🐍 How does the project work? + +The `train.py` or `eval.py` script is the entry point of the project. It uses Hydra to instantiate the model (LightningModule), dataloader (DataModule), and trainer using the configuration reconstructed using Hydra. The model is then trained or evaluated using Pytorch Lightning. + +### Implementing your logic + +You don't need to worry about implementing the training loops, the support for different hardwares, reading of +configurations, etc. You need to care about 4 files for each training : your LightningModule (+ its hydra config), your +DataModule (+ its hydra config). + +In the LightningModule, you need to implement the following methods: + +- `forward method` +- `training_step` +- `validation_step` +- `test_step` +Get inspired by the provided examples in the `src/models` folder. +For the DataModule, you need to implement the following methods: + +- `prepare_data` +- `setup` +- `train_dataloader` +- `val_dataloader` +- `test_dataloader` + +Get inspired by the provided examples in the `src/data` folder. + +Get to know more about Pytorch Lightning's [LightningModule](https://pytorch-lightning.readthedocs.io/en/0.10.0/lightning_module.html) and [DataModule](https://pytorch-lightning.readthedocs.io/en/0.10.0/datamodules.html) in the Pytorch Lightning documentation. +Finally in the associated configs/ folder, you need to implement the yaml configuration files for the model and dataloader. + +### 🔍 The power of Hydra + +As Hydra is used for configuration, you can easily change the hyperparameters of your model, the dataloader, the trainer, etc. by changing the yaml configuration files in the `configs/` folder. You can also use the `--multirun` option to run multiple experiments with different configurations. + +But also, as it used to instantiate the model and dataloader, you can easily change the model, dataloader, or any other component by changing the yaml configuration files or DIRECTLY IN COMMAND LINE. This is especially useful when you want to use different models or dataloaders. + +For example, you can run the following command to train a model with a different architecture, changing the dataset +used, and the trainer used: + +```bash +uv run src/train.py model=LeNet datamodule=MNISTDataModule trainer=gpu +``` + +Read more about Hydra in the [official documentation](https://hydra.cc/docs/intro/). + +### 💡 Best practices + +- Typing your functions and classes with `TorchTyping` for better type checking (in addition to python's typing module) +- Docstring your functions and classes, it is even more important as it is used to generate the documentation with Mkdocs +- Use the `make` commands to run your code, it is easier and faster than writing the full command (and check the Makefile for all available commands 😉) +- [Use the pre-commit hooks](https://pre-commit.com/) to ensure your code is formatted correctly and is of good quality +- [UV](https://docs.astral.sh/uv/ ) is powerful (multi-thread, package graph solving, rust backend, etc.) use it as much +as you can. + +### 📚 Documentation + +You have the possibility to generate a documentation website using Mkdocs. It will automatically generate the documentation based on both the markdown files in the `docs/` folder and the docstrings in your code. +To generate and serve the documentation locally: + +```bash +make serve-docs # Documentation will be available at http://localhost:8000 +``` + +And to deploy it to Github pages (youn need to enable Pages in your repository configuration and set it to use the +gh-pages branch): + +```bash +make pages-deploy # It will create a gh-pages branch and push the documentation to it +``` + +### 🌳 Tree Explained + +``` +. +├── commit-template.txt # use this file to set your commit message template, with make configure-commit template +├── configs # configuration files for hydra +│   ├── callbacks # configuration files for callbacks +│   ├── data # configuration files for datamodules +│   ├── debug # configuration files for pytorch lightning debuggers +│   ├── eval.yaml # configuration file for evaluation +│   ├── experiment # configuration files for experiments +│   ├── extras # configuration files for extra components +│   ├── hparams_search # configuration files for hyperparameters search +│   ├── local # configuration files for local training +│   ├── logger # configuration files for loggers (neptune, wandb, etc.) +│   ├── model # configuration files for models (LightningModule) +│   ├── paths # configuration files for paths +│   ├── trainer # configuration files for trainers (cpu, gpu, tpu) +│   └── train.yaml # configuration file for training +├── data # data folder (to store potentially downloaded datasets) +├── Makefile # makefile contains useful commands for the project +├── notebooks # notebooks folder +├── pyproject.toml # pyproject.toml file for uv package manager +├── README.md # this file +├── ruff.toml # ruff.toml file for pre-commit +├── scripts # scripts folder +│   └── example_train.sh +├── src # source code folder +│   ├── data # datamodules folder +│   │   ├── components +│   │   └── mnist_datamodule.py +│   ├── eval.py # evaluation entry script +│   ├── models # models folder (LightningModule) +│   │   ├── components # components folder, contains model parts or "nets" +│   ├── train.py # training entry script +│   └── utils # utils folder +│   ├── instantiators.py # instantiators for models and dataloaders +│   ├── logging_utils.py # logger utils +│   ├── pylogger.py # multi-process and multi-gpu safe logging +│   ├── rich_utils.py # rich utils +│   └── utils.py # general utils like multi-processing, etc. +└── tests # tests folder + └── conftest.py # fixtures for tests + └── mock_test.py # example of mocking tests + +```` + +## 🤝 Contributing + +For more information on how to contribute to this project, please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file. + +## 🌟 Aknowledgements + +This template was heavily inspired by great existing ones, like: + +- [Lightning Hydra Template](https://github.com/ashleve/lightning-hydra-template/) +- [Pytorch Tempest](https://github.com/Erlemar/pytorch_tempest) +- [Yet Another Lightning Hydra Template](https://github.com/gorodnitskiy/yet-another-lightning-hydra-template) +- [Pytorch Style Guide](https://github.com/IgorSusmelj/pytorch-styleguide) +
+ +But with a few opininated changes and improvements, go check them out!