diff --git a/.deepsource.toml b/.deepsource.toml new file mode 100644 index 00000000..cfae3101 --- /dev/null +++ b/.deepsource.toml @@ -0,0 +1,17 @@ +version = 1 + +test_patterns = ["tests/**"] + +exclude_patterns = ["docs/**", "nbs/**"] + +[[analyzers]] +name = "python" +enabled = true + + [analyzers.meta] + runtime_version = "3.x.x" + max_line_length = 120 + +[[transformers]] +name = "black" +enabled = true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..036196e3 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,18 @@ +# Lines starting with '#' are comments. +# Each line is a file pattern followed by one or more owners. + +# More details are here: https://help.github.com/articles/about-codeowners/ + +# The '*' pattern is global owners. + +# Order is important. The last matching pattern has the most precedence. +# The folders are ordered as follows: + +# In each subsection folders are ordered first by depth, then alphabetically. +# This should make it easy to add new rules without breaking existing ones. + +# Global rule: +* @aniketmaurya + +# tests +/tests/** @aniketmaurya diff --git a/.github/DEPRECATED/main.yml.off b/.github/DEPRECATED/main.yml.off new file mode 100644 index 00000000..9e4280ca --- /dev/null +++ b/.github/DEPRECATED/main.yml.off @@ -0,0 +1,52 @@ +name: CI +on: + workflow_dispatch: + pull_request: + push: + branches: [master] +env: + HOME: /root +defaults: + run: + shell: bash + +jobs: + nb-sync: + runs-on: ubuntu-latest + container: fastai/jekyll + steps: + - uses: actions/checkout@v2 + - run: pip install -qe . + - name: Check if notebooks are synced and cleaned + uses: fastai/workflows/nb@master + + nbdev_test_nbs: + runs-on: ubuntu-latest + container: fastai/jekyll + steps: + - uses: actions/checkout@v2 + - name: Run tests + run: | + pip install -qe . + pip install -qU fastcore + # make test + +# fastpages-integration: +# runs-on: ubuntu-latest +# container: fastai/jekyll +# steps: +# - uses: actions/checkout@v2 +# - name: Install nbdev +# run: | +# pip install -qe . +# pip install -qU fastcore +# - uses: actions/checkout@v2 +# with: +# repository: "fastai/fastpages" +# path: "data" +# - name: convert posts +# run: | +# cd data +# mkdir -p /fastpages +# cp -r _action_files/* /fastpages/ +# /fastpages/action_entrypoint.sh diff --git a/.github/DEPRECATED/sonar.yml.off b/.github/DEPRECATED/sonar.yml.off new file mode 100644 index 00000000..ee3dde8f --- /dev/null +++ b/.github/DEPRECATED/sonar.yml.off @@ -0,0 +1,20 @@ +name: Sonar +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened] +jobs: + sonarcloud: + name: SonarCloud + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + - name: SonarCloud Scan + uses: SonarSource/sonarcloud-github-action@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/.github/Dockerfile b/.github/Dockerfile new file mode 100644 index 00000000..6185f83b --- /dev/null +++ b/.github/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.7 + +RUN \ + apt-get update \ + && apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && pip3 install --upgrade pip setuptools wheel --no-cache-dir \ + && pip3 install --no-cache-dir git+https://github.com/aniketmaurya/chitra@master diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 70ed1c64..610e8299 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,12 +1,11 @@ # These are supported funding model platforms -github: [aniketmaurya] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] -patreon: aniketmaurya +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # replace with username open_collective: # Replace with a single Open Collective username -ko_fi: # Replace with a single Ko-fi username +ko_fi: aniketmaurya # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: # Replace with a single IssueHunt username otechie: # Replace with a single Otechie username -custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 00000000..7759ca37 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,16 @@ +# Add 'docs' to any changes within 'docs' folder or any subfolders +# https://stackoverflow.com/questions/34691809/regex-match-folder-and-all-subfolders +documentation: +- docs/**/* + +example: +- examples($|/.*) + +test: +- tests/**/* + +serve: +- chitra/serve($|/.*) + +cli: +- chitra/cli($|/.*) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4e6df353..e0c3f03c 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,18 +1,20 @@ #### Changes - + Fixes # (issue) + #### Type of change - +- [ ] Documentation Update - [ ] Bug fix (non-breaking change which fixes an issue) - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update + #### Checklist - [ ] My code follows the style guidelines of this project diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 3b90824c..00000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,67 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: [ master ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '39 18 * * 5' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - - strategy: - fail-fast: false - matrix: - language: [ 'python' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] - # Learn more: - # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/dockerhub.yml b/.github/workflows/dockerhub.yml new file mode 100644 index 00000000..ece50970 --- /dev/null +++ b/.github/workflows/dockerhub.yml @@ -0,0 +1,47 @@ +# This is a basic workflow to help you get started with Actions + +name: CI to Docker Hub + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the main branch + push: + branches: [ main ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + + - name: Check Out Repo + uses: actions/checkout@v2 + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + + - name: Build and push + id: docker_build + uses: docker/build-push-action@v2 + with: + context: ./ + file: ./.github/Dockerfile + push: true + tags: ${{ secrets.DOCKER_HUB_USERNAME }}/chitra:latest + + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml deleted file mode 100644 index 7c724a62..00000000 --- a/.github/workflows/label.yml +++ /dev/null @@ -1,19 +0,0 @@ -# This workflow will triage pull requests and apply a label based on the -# paths that are modified in the pull request. -# -# To use this workflow, you will need to set up a .github/labeler.yml -# file with configuration. For more information, see: -# https://github.com/actions/labeler - -name: Labeler -on: [pull_request] - -jobs: - label: - - runs-on: ubuntu-latest - - steps: - - uses: actions/labeler@v2 - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 00000000..331ddc9b --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,11 @@ +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: + triage: + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v3 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index feb5d3aa..0b4e0720 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,53 +1,78 @@ -name: CI +name: pytest on: - workflow_dispatch: - pull_request: push: - branches: [master] -env: - HOME: /root -defaults: - run: - shell: bash + branches: [ master ] + pull_request: + branches: [ master ] + jobs: - nb-sync: - runs-on: ubuntu-latest - container: fastai/jekyll - steps: - - uses: actions/checkout@v2 - - run: pip install -qe . - - name: Check if notebooks are synced and cleaned - uses: fastai/workflows/nb@master - - nbdev_test_nbs: - runs-on: ubuntu-latest - container: fastai/jekyll + pytest: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-latest, macos-latest ] + include: + - os: ubuntu-latest + path: ~/.cache/pip + - os: macos-latest + path: ~/Library/Caches/pip + env: + OS: ${{ matrix.os }} + PYTHON: '3.7' + + steps: - - uses: actions/checkout@v2 - - name: Run tests - run: | - pip install -qe . - pip install -qU fastcore - # make test - -# fastpages-integration: -# runs-on: ubuntu-latest -# container: fastai/jekyll -# steps: -# - uses: actions/checkout@v2 -# - name: Install nbdev -# run: | -# pip install -qe . -# pip install -qU fastcore -# - uses: actions/checkout@v2 -# with: -# repository: "fastai/fastpages" -# path: "data" -# - name: convert posts -# run: | -# cd data -# mkdir -p /fastpages -# cp -r _action_files/* /fastpages/ -# /fastpages/action_entrypoint.sh - + - uses: actions/checkout@v2 + with: + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + + - name: Setup Python + uses: actions/setup-python@master + with: + python-version: 3.7 + + - name: Cache pip + uses: actions/cache@v2 + with: + path: ${{ matrix.path }} + key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + + - name: Installation + run: | + python --version + pip --version + pip install flit + flit install --deps all + pip list + shell: bash + + - name: Run Test with Coverage + run: | + coverage erase + coverage run -m pytest + + - name: Generate Coverage Report + run: | + coverage report -m -i + coverage xml -i + + - name: Upload Coverage to Codecov + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + + - name: SonarCloud Scan + if: runner.os != 'macOS' && env.SONAR_TOKEN != null + uses: SonarSource/sonarcloud-github-action@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/.github/workflows/mkdocs_ci.yml b/.github/workflows/mkdocs_ci.yml index c91d2c07..69e3676a 100644 --- a/.github/workflows/mkdocs_ci.yml +++ b/.github/workflows/mkdocs_ci.yml @@ -12,5 +12,6 @@ jobs: - uses: actions/setup-python@v2 with: python-version: 3.x + - run: make build-docs # copy README.md to docs/index.MD - run: pip install mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-macros-plugin - - run: mkdocs gh-deploy --force \ No newline at end of file + - run: mkdocs gh-deploy --force diff --git a/.github/workflows/welcome.yml b/.github/workflows/welcome.yml index 663655a5..7cabc9c5 100644 --- a/.github/workflows/welcome.yml +++ b/.github/workflows/welcome.yml @@ -11,4 +11,3 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} issue-message: "👋 @${{github.actor}}! Thank you for opening your first issue in this repo. We are so happy that you have decided to contribute and value your contribution. Please read these materials before proceeding: [Contributing Guide](https://github.com/fastai/fastai/blob/master/CONTRIBUTING.md) and [Code of Conduct](https://github.com/aniketmaurya/chitra/blob/master/CODE_OF_CONDUCT.md)." pr-message: "👋 @${{github.actor}}! Thank you for opening your first pull request in this repo. We are so happy that you have decided to contribute and value your contribution. Please read these materials before proceeding: [Contributing Guide](https://github.com/fastai/fastai/blob/master/CONTRIBUTING.md) and [Code of Conduct](https://github.com/aniketmaurya/chitra/blob/master/CODE_OF_CONDUCT.md)." - diff --git a/.gitignore b/.gitignore index 9ca09886..be036164 100644 --- a/.gitignore +++ b/.gitignore @@ -139,3 +139,6 @@ checklink/cookies.txt # .gitconfig is now autogenerated .gitconfig +*.pth +*.ckpt +*.h5 diff --git a/.pep8speaks.yml b/.pep8speaks.yml new file mode 100644 index 00000000..ae2a2707 --- /dev/null +++ b/.pep8speaks.yml @@ -0,0 +1,28 @@ +# File : .pep8speaks.yml + +scanner: + diff_only: True # If False, the entire file touched by the Pull Request is scanned for errors. If True, only the diff is scanned. + linter: pycodestyle # Other option is flake8 + +pycodestyle: # Same as scanner.linter value. Other option is flake8 + max-line-length: 100 # Default is 79 in PEP 8 + ignore: # Errors and warnings to ignore + - W504 # line break after binary operator + - E402 # module level import not at top of file + - E731 # do not assign a lambda expression, use a def + - C406 # Unnecessary list literal - rewrite as a dict literal. + - E741 # ambiguous variable name + +no_blank_comment: True # If True, no comment is made on PR without any errors. +descending_issues_order: False # If True, PEP 8 issues in message will be displayed in descending order of line numbers in the file + +message: # Customize the comment made by the bot + opened: # Messages when a new PR is submitted + header: "Hello @{name}! Thanks for opening this PR. " + # The keyword {name} is converted into the author's username + footer: "Do see the [Hitchhiker's guide to code style](https://goo.gl/hqbW4r)" + # The messages can be written as they would over GitHub + updated: # Messages when new commits are added to the PR + header: "Hello @{name}! Thanks for updating this PR. " + footer: "" # Why to comment the link to the style guide everytime? :) + no_errors: "There are currently no PEP 8 issues detected in this Pull Request. Cheers! :beers: " diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..ee0c3dc1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,38 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-added-large-files + + - repo: https://github.com/psf/black + rev: 21.11b1 + hooks: + - id: black + name: "Black: The uncompromising Python code formatter" + files: chitra tests + + - repo: https://github.com/PyCQA/isort + rev: 5.10.1 + hooks: + - id: isort + name: "Sort Imports" + args: [ "--profile black" ] + files: chitra tests + + - repo: local + hooks: + - id: clean + name: clean + entry: make + args: [ "clean" ] + language: system + pass_filenames: false + + - repo: https://github.com/kynan/nbstripout + rev: 0.5.0 + hooks: + - id: nbstripout diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 214a0e5c..47fdabc9 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -12,4 +12,4 @@ mkdocs: python: version: 3.7 install: - - requirements: docs/requirements.txt \ No newline at end of file + - requirements: docs/requirements.txt diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..953232a5 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +hello@aniketmaurya.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 38fbde54..08c64d61 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,33 +1,41 @@ -# How to contribute +# Contributing guidelines -## How to get started +We welcome any kind of contribution to our software, from simple comment or question to a full fledged [pull request](https://help.github.com/articles/about-pull-requests/). Please read and follow our [Code of Conduct](CODE_OF_CONDUCT.md). -Before anything else, please install the git hooks that run automatic scripts during each commit and merge to strip the notebooks of superfluous metadata (and avoid merge conflicts). After cloning the repository, run the following command inside it: -``` -nbdev_install_git_hooks -``` +A contribution can be one of the following cases: -## Did you find a bug? +1. you have a question; +1. you think you may have found a bug (including unexpected behavior); +1. you want to make some kind of change to the code base (e.g. to fix a bug, to add a new feature, to update documentation); +1. you want to make a new release of the code base. -* Ensure the bug was not already reported by searching on GitHub under Issues. -* If you're unable to find an open issue addressing the problem, open a new one. Be sure to include a title and clear description, as much relevant information as possible, and a code sample or an executable test case demonstrating the expected behavior that is not occurring. -* Be sure to add the complete error messages. +The sections below outline the steps in each case. -#### Did you write a patch that fixes a bug? +## You have a question -* Open a new GitHub pull request with the patch. -* Ensure that your PR includes a test that fails without your patch, and pass with it. -* Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable. +1. use the search functionality [here](https://github.com/aniketmaurya/chitra/issues) to see if someone already filed the same issue; +2. if your issue search did not yield any relevant results, make a new issue; +3. apply the "Question" label; apply other labels when relevant. -## PR submission guidelines +## You think you may have found a bug -* Keep each PR focused. While it's more convenient, do not combine several unrelated fixes together. Create as many branches as needing to keep each PR focused. -* Do not mix style changes/fixes with "functional" changes. It's very difficult to review such PRs and it most likely get rejected. -* Do not add/remove vertical whitespace. Preserve the original style of the file you edit as much as you can. -* Do not turn an already submitted PR into your development playground. If after you submitted PR, you discovered that more work is needed - close the PR, do the required work and then submit a new PR. Otherwise each of your commits requires attention from maintainers of the project. -* If, however, you submitted a PR and received a request for changes, you should proceed with commits inside that PR, so that the maintainer can see the incremental fixes and won't need to review the whole PR again. In the exception case where you realize it'll take many many commits to complete the requests, then it's probably best to close the PR, do the work and then submit it again. Use common sense where you'd choose one way over another. +1. use the search functionality [here](https://github.com/aniketmaurya/chitra/issues) to see if someone already filed the same issue; +1. if your issue search did not yield any relevant results, make a new issue, making sure to provide enough information to the rest of the community to understand the cause and context of the problem. Depending on the issue, you may want to include: + - the [SHA hashcode](https://help.github.com/articles/autolinked-references-and-urls/#commit-shas) of the commit that is causing your problem; + - some identifying information (name and version number) for dependencies you're using; + - information about the operating system; +1. apply relevant labels to the newly created issue. -## Do you want to contribute to the documentation? +## You want to make some kind of change to the code base -* Docs are automatically created from the notebooks in the nbs folder. +1. (**important**) announce your plan to the rest of the community *before you start working*. This announcement should be in the form of a (new) issue; +1. (**important**) wait until some kind of consensus is reached about your idea being a good idea; +1. if needed, fork the repository to your own Github profile and create your own feature branch off of the latest master commit. While working on your feature branch, make sure to stay up to date with the master branch by pulling in changes, possibly from the 'upstream' repository (follow the instructions [here](https://help.github.com/articles/configuring-a-remote-for-a-fork/) and [here](https://help.github.com/articles/syncing-a-fork/)); +1. make sure the existing tests still work by running ``pytest``; +1. add your own tests (if necessary); +1. update or expand the documentation; +1. update the `CHANGELOG.md` file with change; +1. push your feature branch to (your fork of) the https://github.com/aniketmaurya/chitra repository on GitHub; +1. create the pull request, e.g. following the instructions [here](https://help.github.com/articles/creating-a-pull-request/). +In case you feel like you've made a valuable contribution, but you don't know how to write or run tests for it, or how to generate the documentation: don't let this discourage you from making the pull request; we can help you! Just go ahead and submit the pull request, but keep in mind that you might be asked to append additional commits to your pull request. diff --git a/LICENSE b/LICENSE index f0f1eb5d..1e17d32d 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2020 Aniket Maurya + Copyright 2020-21 Aniket Maurya Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MANIFEST.in b/MANIFEST.in index 5c0e7ced..4d393334 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -include settings.ini +include pyproject.toml include LICENSE include CONTRIBUTING.md include README.md diff --git a/Makefile b/Makefile index 6060c0be..1e81da0d 100644 --- a/Makefile +++ b/Makefile @@ -1,33 +1,37 @@ -SRC = $(wildcard ./*.ipynb) +.PHONY: build-docs clean style build pypi -all: chitra docs +build-docs: + cp README.md docs/index.md -chitra: $(SRC) - nbdev_build_lib - touch chitra - -docs_serve: docs - cd docs && bundle exec jekyll serve - -docs: $(SRC) - nbdev_build_docs - touch docs +docs-serve: build-docs + mkdocs serve test: - nbdev_test_nbs - -release: pypi - nbdev_bump_version - -pypi: dist - twine upload --repository pypi dist/* + pytest -dist: clean - python setup.py sdist bdist_wheel +coverage: ## Run tests with coverage + coverage erase + coverage run -m pytest + coverage report -m + coverage xml clean: - rm -rf dist + rm -rf dist/ + find . -type f -name "*.DS_Store" -ls -delete + find . | grep -E "(__pycache__|\.pyc|\.pyo)" | xargs rm -rf + find . | grep -E ".pytest_cache" | xargs rm -rf + find . | grep -E ".ipynb_checkpoints" | xargs rm -rf + rm -f .coverage style: - yapf -ir chitra - isort chitra \ No newline at end of file + black chitra tests examples + isort chitra tests examples + +build: style clean + flit build + +pypi: build + flit publish + +push: + git push && git push --tags diff --git a/README.md b/README.md index 489e44b4..f4c6a5f6 100644 --- a/README.md +++ b/README.md @@ -1,87 +1,112 @@ # chitra -![](https://www.code-inspector.com/project/16652/score/svg) -![](https://www.code-inspector.com/project/16652/status/svg) +[![CodeFactor](https://www.codefactor.io/repository/github/aniketmaurya/chitra/badge)](https://www.codefactor.io/repository/github/aniketmaurya/chitra) +[![Maintainability Rating](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=sqale_rating)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=reliability_rating)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=security_rating)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +[![Coverage](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=coverage)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +![GitHub issues](https://img.shields.io/github/issues/aniketmaurya/chitra?style=flat) +[![Documentation Status](https://readthedocs.org/projects/chitra/badge/?version=latest)](https://chitra.readthedocs.io/en/latest/?badge=latest) +[![Discord](https://img.shields.io/discord/848469007443165184?style=flat)](https://discord.gg/TdnAfDw3kB) ## What is chitra? -**chitra** (**चित्र**) is a Deep Learning Computer Vision library for easy data loading, model building and model interpretation with GradCAM/GradCAM++. +**chitra** (**चित्र**) is a multi-functional library for full-stack Deep Learning. It simplifies Model Building, API development, and Model Deployment. -Highlights: -- Faster data loading without any boilerplate. -- Progressive resizing of images. -- Rapid experiments with different models using `chitra.trainer` module. -- Train models with cyclic learning rate. -- Model interpretation using GradCAM/GradCAM++ with no extra code. +### Components +arch -> 📢 If you have more use case please [**raise an issue/PR**](https://github.com/aniketmaurya/chitra/issues/new/choose) with the feature you want. -> 📢 Join discord channel - https://discord.gg/TdnAfDw3kB +Load Image from Internet url, filepath or `numpy` array and plot Bounding Boxes on the images easily. +Model Training and Explainable AI. +Easily create UI for Machine Learning models or Rest API backend that can be deployed for serving ML Models in Production. -## Installation +### 📌 Highlights: +- [New] [Auto Dockerization of Models](https://chitra.readthedocs.io/en/latest/source/cli/builder/builder-create/) 🐳 +- [New] [Framework Agnostic Model Serving & Interactive UI prototype app](https://chitra.readthedocs.io/en/latest/source/api/serve/model_server/) ✨🌟 +- [New] [Data Visualization, Bounding Box Visualization](https://chitra.readthedocs.io/en/latest/source/api/image/chitra-class/) 🐶🎨 +- Model interpretation using GradCAM/GradCAM++ with no extra code 🔥 +- Faster data loading without any boilerplate 🤺 +- Progressive resizing of images 🎨 +- Rapid experiments with different models using `chitra.trainer` module 🚀 -### Using pip (recommended) +### 🚘 Implementation Roadmap + +- One click deployment to `serverless` platform. + +> If you have more use case please [**raise an issue/PR**](https://github.com/aniketmaurya/chitra/issues/new/choose) with the feature you want. +> If you want to contribute, feel free to raise a PR. It doesn't need to be perfect. +> We will help you get there. +## 📀 Installation + +[![Downloads](https://pepy.tech/badge/chitra)](https://pepy.tech/project/chitra) +[![Downloads](https://pepy.tech/badge/chitra/month)](https://pepy.tech/project/chitra) +![GitHub License](https://img.shields.io/github/license/aniketmaurya/chitra?style=flat) + +### Using pip (recommended) +1. Minimum installation `pip install -U chitra` +1. Full Installation +`pip install -U 'chitra[all]'` + +1. Install for Training +`pip install -U 'chitra[nn]'` + +1. Install for Serving +`pip install -U 'chitra[serve]'` + ### From source ``` -git clone https://github.com/aniketmaurya/chitra.git -cd chitra -pip install -e . +pip install git+https://github.com/aniketmaurya/chitra@master ``` -### From GitHub -``` -pip install git+https://github.com/aniketmaurya/chitra@master +Or, +``` +git clone https://github.com/aniketmaurya/chitra.git +cd chitra +pip install . ``` -## Usage +## 🧑‍💻 Usage ### Loading data for image classification -Chitra `dataloader` and `datagenerator` modules for loading data. `dataloader` is a minimal dataloader that returns `tf.data.Dataset` object. `datagenerator` provides flexibility to users on how they want to load and manipulate the data. +Chitra `dataloader` and `datagenerator` modules for loading data. `dataloader` is a minimal dataloader that +returns `tf.data.Dataset` object. `datagenerator` provides flexibility to users on how they want to load and manipulate +the data. -``` +```python import numpy as np -import tensorflow as tf import chitra -from chitra.dataloader import Clf, show_batch +from chitra.dataloader import Clf import matplotlib.pyplot as plt -``` -``` + clf_dl = Clf() data = clf_dl.from_folder(cat_dog_path, target_shape=(224, 224)) - -clf_dl.show_batch(8, figsize=(8,8)) +clf_dl.show_batch(8, figsize=(8, 8)) ``` -``` -for e in data.take(1): - image = e[0].numpy().astype('uint8') - label = e[1].numpy() -plt.imshow(image) -plt.show() -``` - - -![png](docs/old_source/images/output_6_0.png) - +![Show Batch](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/images/output_3_1.png) ## Image datagenerator + Dataset class provides the flexibility to load image dataset by updating components of the class. Components of Dataset class are: + - image file generator - resizer - label generator - image loader -These components can be updated with custom function by the user according to their dataset structure. For example the Tiny Imagenet dataset is organized as- +These components can be updated with custom function by the user according to their dataset structure. For example the +Tiny Imagenet dataset is organized as- ``` train_folder/ @@ -94,74 +119,69 @@ train_folder/ . . ......imageN.jpg - - ``` -The inbuilt file generator search for images on the `folder1`, now we can just update the `image file generator` and rest of the functionality will remain same. +The inbuilt file generator search for images on the `folder1`, now we can just update the `image file generator` and +rest of the functionality will remain same. **Dataset also support progressive resizing of images.** ### Updating component -``` +```python from chitra.datagenerator import Dataset -from glob import glob ds = Dataset(data_path) # it will load the folders and NOT images ds.filenames[:3] ``` - No item present in the image size list - - - +
Output + No item present in the image size list ['/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/n02795169_boxes.txt', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02769748/images'] +
- -``` +```python def load_files(path): return glob(f'{path}/*/images/*') + def get_label(path): return path.split('/')[-3] - + + ds.update_component('get_filenames', load_files) ds.filenames[:3] ``` +
Output + get_filenames updated with No item present in the image size list - - - - ['/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images/n02795169_369.JPEG', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images/n02795169_386.JPEG', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images/n02795169_105.JPEG'] - +
### Progressive resizing + > It is the technique to sequentially resize all the images while training the CNNs on smaller to bigger image sizes. Progressive Resizing is described briefly in his terrific fastai course, “Practical Deep Learning for Coders”. A great way to use this technique is to train a model with smaller image size say 64x64, then use the weights of this model to train another model on images of size 128x128 and so on. Each larger-scale model incorporates the previous smaller-scale model layers and weights in its architecture. ~[KDnuggets](https://www.kdnuggets.com/2019/05/boost-your-image-classification-model.html) -``` +```python image_sz_list = [(28, 28), (32, 32), (64, 64)] ds = Dataset(data_path, image_size=image_sz_list) ds.update_component('get_filenames', load_files) ds.update_component('get_label', get_label) - -print() # first call to generator for img, label in ds.generator(): print('first call to generator:', img.shape) @@ -176,21 +196,25 @@ for img, label in ds.generator(): for img, label in ds.generator(): print('third call to generator:', img.shape) break - ``` +
Output + get_filenames updated with get_label updated with - + first call to generator: (28, 28, 3) seconds call to generator: (32, 32, 3) third call to generator: (64, 64, 3) +
### tf.data support -Creating a `tf.data` dataloader was never as easy as this one liner. It converts the Python generator into `tf.data.Dataset` for a faster data loading, prefetching, caching and everything provided by tf.data. -``` +Creating a `tf.data` dataloader was never as easy as this one liner. It converts the Python generator +into `tf.data.Dataset` for a faster data loading, prefetching, caching and everything provided by tf.data. + +```python image_sz_list = [(28, 28), (32, 32), (64, 64)] ds = Dataset(data_path, image_size=image_sz_list) @@ -209,52 +233,49 @@ for e in dl.take(1): print(e[0].shape) ``` +
Output + get_filenames updated with - get_label updated with + get_label updated with (28, 28, 3) (32, 32, 3) (64, 64, 3) +
## Trainer -The Trainer class inherits from `tf.keras.Model`, it contains everything that is required for training. -It exposes trainer.cyclic_fit method which trains the model using Cyclic Learning rate discovered by [Leslie Smith](https://arxiv.org/abs/1506.01186). -``` +The Trainer class inherits from `tf.keras.Model`, it contains everything that is required for training. It exposes +trainer.cyclic_fit method which trains the model using Cyclic Learning rate discovered +by [Leslie Smith](https://arxiv.org/abs/1506.01186). + +```python from chitra.trainer import Trainer, create_cnn from chitra.datagenerator import Dataset -from PIL import Image -``` -``` -ds = Dataset(cat_dog_path, image_size=(224,224)) + +ds = Dataset(cat_dog_path, image_size=(224, 224)) model = create_cnn('mobilenetv2', num_classes=2, name='Cat_Dog_Model') trainer = Trainer(ds, model) # trainer.summary() ``` - WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default. - - -``` +```python trainer.compile2(batch_size=8, - optimizer=tf.keras.optimizers.SGD(1e-3, momentum=0.9, nesterov=True), - lr_range=(1e-6, 1e-3), - loss='binary_crossentropy', - metrics=['binary_accuracy']) -``` + optimizer=tf.keras.optimizers.SGD(1e-3, momentum=0.9, nesterov=True), + lr_range=(1e-6, 1e-3), + loss='binary_crossentropy', + metrics=['binary_accuracy']) - Model compiled! - - -``` trainer.cyclic_fit(epochs=5, - batch_size=8, - lr_range=(0.00001, 0.0001), - ) + batch_size=8, + lr_range=(0.00001, 0.0001), +) ``` +
Training Loop... cyclic learning rate already set! + Epoch 1/5 1/1 [==============================] - 0s 14ms/step - loss: 6.4702 - binary_accuracy: 0.2500 Epoch 2/5 @@ -270,24 +291,22 @@ trainer.cyclic_fit(epochs=5, Returning the last set size which is: (224, 224) 1/1 [==============================] - 0s 982us/step - loss: 1.9062 - binary_accuracy: 0.8750 - - - - +
+## ✨ Model Interpretability -## Model Visualization -It is important to understand what is going inside the model. Techniques like GradCam and Saliency Maps can visualize what the Network is learning. `trainer` module has InterpretModel class which creates GradCam and GradCam++ visualization with almost no additional code. +It is important to understand what is going inside the model. Techniques like GradCam and Saliency Maps can visualize +what the Network is learning. `trainer` module has InterpretModel class which creates GradCam and GradCam++ +visualization with almost no additional code. -``` +```python from chitra.trainer import InterpretModel + trainer = Trainer(ds, create_cnn('mobilenetv2', num_classes=1000, keras_applications=False)) model_interpret = InterpretModel(True, trainer) -``` -``` image = ds[1][0].numpy().astype('uint8') image = Image.fromarray(image) model_interpret(image) @@ -296,58 +315,87 @@ print(IMAGENET_LABELS[285]) Returning the last set size which is: (224, 224) index: 282 + Egyptian Mau +![png](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/images/output_22_1.png) +## 🎨 Data Visualization -![png](docs/old_source/images/output_22_1.png) +### Image annotation +Bounding Box creation is based on top of `imgaug` library. - Egyptian Mau +```python +from chitra.image import Chitra +import matplotlib.pyplot as plt +bbox = [70, 25, 190, 210] +label = 'Dog' -## Data Visualization +image = Chitra(image_path, bboxes=bbox, labels=label) +plt.imshow(image.draw_boxes()) +``` -### Image annotation +![png](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/images/preview-bounding-box.png) -Thanks to [**fizyr**](https://github.com/fizyr/keras-retinanet) keras-retinanet. +See [Play with Images](https://chitra.readthedocs.io/en/latest/examples/chitra-class/chitra-class.html) for detailed +example! -``` -from chitra.visualization import draw_annotations +## 🚀 Model Serving (Framework Agnostic) + +Chitra can Create Rest API or Interactive UI app for Any Learning Model - +ML, DL, Image Classification, NLP, Tensorflow, PyTorch or SKLearn. +It provides `chitra.serve.GradioApp` for building Interactive UI app for ML/DL models +and `chitra.serve.API` for building Rest API endpoint. -labels = np.array([label]) -bbox = np.array([[30, 50, 170, 190]]) -label_to_name = lambda x: 'Cat' if x==0 else 'Dog' +```python +from chitra.serve import create_api +from chitra.trainer import create_cnn -draw_annotations(image, ({'bboxes': bbox, 'labels':labels,}), label_to_name=label_to_name) -plt.imshow(image) -plt.show() +model = create_cnn('mobilenetv2', num_classes=2) +create_api(model, run=True, api_type='image-classification') ``` +
API Docs Preview -![png](docs/old_source/images/output_24_0.png) +![Preview Model Server](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/examples/model-server/preview.png) +
-## Utils +See [Example Section](https://chitra.readthedocs.io/en/latest/source/api/serve/model_server/) for detailed +explanation! -Limit GPU memory or enable dynamic GPU memory growth for Tensorflow +## 🛠 Utility -``` -from chitra.utils import limit_gpu, gpu_dynamic_mem_growth +Limit GPU memory or enable dynamic GPU memory growth for Tensorflow. + +```python +from chitra.utility.tf_utils import limit_gpu, gpu_dynamic_mem_growth # limit the amount of GPU required for your training -limit_gpu(gpu_id=0, memory_limit=1024*2) +limit_gpu(gpu_id=0, memory_limit=1024 * 2) ``` No GPU:0 found in your system! - -``` +```python gpu_dynamic_mem_growth() ``` No GPU found on the machine! +## 🤗 Contribute + +Contributions of any kind are welcome. Please check the [**Contributing +Guidelines**](https://github.com/aniketmaurya/chitra/blob/master/CONTRIBUTING.md) before contributing. + +## Code Of Conduct + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +Read full [**Contributor Covenant Code of Conduct**](https://github.com/aniketmaurya/chitra/blob/master/CODE_OF_CONDUCT.md) -## Contributing +## Acknowledgement -Contributions of any kind are welcome. Please check the [**Contributing Guidelines**](https://github.com/aniketmaurya/chitra/blob/master/CONTRIBUTING.md) before contributing. +*chitra* is built with help of awesome libraries like [Tensorflow 2.x](https://github.com/tensorflow/tensorflow), +[imgaug](https://github.com/aleju/imgaug), [FastAPI](https://github.com/tiangolo/fastapi) and [Gradio](https://gradio.app). diff --git a/chitra/__about__.py b/chitra/__about__.py new file mode 100644 index 00000000..0ac4ceb8 --- /dev/null +++ b/chitra/__about__.py @@ -0,0 +1 @@ +documentation_url = "https://chitra.readthedocs.io/en/latest" diff --git a/chitra/__init__.py b/chitra/__init__.py index c35625a5..8e40cad9 100644 --- a/chitra/__init__.py +++ b/chitra/__init__.py @@ -1 +1,4 @@ -__version__ = "0.0.21" +"""A multi functional library for full stack Deep Learning.""" + +__version__ = "0.2.0" +__license__ = "Apache License 2.0" diff --git a/chitra/_nbdev.py b/chitra/_nbdev.py deleted file mode 100644 index 1774cf15..00000000 --- a/chitra/_nbdev.py +++ /dev/null @@ -1,63 +0,0 @@ -# AUTOGENERATED BY NBDEV! DO NOT EDIT! - -__all__ = ["index", "modules", "custom_doc_links", "git_url"] - -index = { - "remove_dsstore": "00_core.ipynb", - "get_basename": "04_dataloader.ipynb", - "IMAGENET_LABELS": "00_core.ipynb", - "tf": "01_image.ipynb", - "torch": "01_image.ipynb", - "DATA_FORMATS": "01_image.ipynb", - "DEFAULT_MODE": "01_image.ipynb", - "read_image": "01_image.ipynb", - "resize_image": "01_image.ipynb", - "BoundingBoxes": "01_image.ipynb", - "Chitra": "01_image.ipynb", - "benchmark": "02_datagenerator.ipynb", - "get_filenames": "02_datagenerator.ipynb", - "get_label": "02_datagenerator.ipynb", - "ImageSizeList": "02_datagenerator.ipynb", - "Pipeline": "02_datagenerator.ipynb", - "Dataset": "02_datagenerator.ipynb", - "MODEL_DICT": "03_trainer.ipynb", - "OPT_DICT": "03_trainer.ipynb", - "create_classifier": "03_trainer.ipynb", - "create_cnn": "03_trainer.ipynb", - "Trainer": "03_trainer.ipynb", - "InterpretModel": "03_trainer.ipynb", - "Learner": "03_trainer.ipynb", - "AUTOTUNE": "04_dataloader.ipynb", - "show_batch": "04_dataloader.ipynb", - "Clf": "04_dataloader.ipynb", - "label_color": "05_visualization.ipynb", - "colors": "05_visualization.ipynb", - "draw_box": "05_visualization.ipynb", - "draw_caption": "05_visualization.ipynb", - "draw_boxes": "05_visualization.ipynb", - "draw_detections": "05_visualization.ipynb", - "draw_annotations": "05_visualization.ipynb", - "disable_gpu": "06_tf_utils.ipynb", - "limit_gpu": "06_tf_utils.ipynb", - "gpu_dynamic_mem_growth": "06_tf_utils.ipynb", - "is_installed": "07_import_utils.ipynb", - "INSTALLED_MODULES": "07_import_utils.ipynb", - "pytorch_to_onnx": "08_model_converter.ipynb", - "onnx_to_pytorch": "08_model_converter.ipynb", - "tf2_to_onnx": "08_model_converter.ipynb", - "tf2_to_pytorch": "08_model_converter.ipynb" -} - -modules = [ - "core.py", "image.py", "datagenerator.py", "trainer.py", "dataloader.py", - "visualization.py", "utility/tf_utils.py", "utility/import_utils.py", - "converter/core.py" -] - -doc_url = "https://chitra.aniketmaurya.com/" - -git_url = "https://github.com/aniketmaurya/chitra/tree/master/" - - -def custom_doc_links(name): - return None diff --git a/chitra/assets/API.Dockerfile b/chitra/assets/API.Dockerfile new file mode 100644 index 00000000..91124eb7 --- /dev/null +++ b/chitra/assets/API.Dockerfile @@ -0,0 +1,14 @@ +FROM aniketmaurya/chitra:latest + + +WORKDIR /app + +COPY ./requirements.txt requirements.txt + +RUN pip3 install --no-cache-dir -r requirements.txt + +COPY . . + +EXPOSE PORT + +CMD [ "uvicorn", "main:app", "--host=0.0.0.0", "--port=PORT"] diff --git a/chitra/converter/__init__.py b/chitra/cli/__init__.py similarity index 100% rename from chitra/converter/__init__.py rename to chitra/cli/__init__.py diff --git a/chitra/cli/builder.py b/chitra/cli/builder.py new file mode 100644 index 00000000..6e55870c --- /dev/null +++ b/chitra/cli/builder.py @@ -0,0 +1,94 @@ +import os +import shlex +import subprocess +from glob import glob +from pathlib import Path +from typing import List, Optional + +import typer + +import chitra + +app = typer.Typer( + help="""Auto Builds docker image for chitra ModelServer. + path should contain a `main.py` file which will have an object of type `chitra.serve.ModelServer` and + its name should be `app`. If you have any external Python dependency then create a `requirements.txt` file and + keep in the same directory.""" +) + + +def get_dockerfile() -> str: + path = Path(os.path.dirname(chitra.__file__)) / "assets/API.Dockerfile" + with open(path, "r") as fr: + data = fr.read() + return data + + +def file_check(files: List) -> None: + files = map(os.path.basename, files) + if "requirements.txt" not in files: + raise UserWarning("requirements.txt not found!") + + if "main.py" not in files: + raise UserWarning( + "main.py not found! Your main.py should contain app \ + object of type chitra.serve.ModelServer" + ) + + +def text_to_file(text: str, path: str): + with open(path, "w") as fw: + fw.write(text) + + +@app.command() +def create( + path: str = "./", + port: Optional[str] = None, + tag: Optional[str] = None, +): + """ + Auto-builds Docker image for chitra ModelServer + + Args: + + path: file-location of main.py + + port: port to expose + + tag: tag of docker image + + """ + if not port: + port = "8080" + if not tag: + tag = "chitra-server" + + path = Path(path) + files = glob(str(path / "*")) + file_check(files) + style_path = typer.style(str(path), fg=typer.colors.GREEN) + typer.echo(f"Everything under {style_path} will be added to Docker image!") + show_files = typer.confirm("Do you wish to see the files to be added?") + if show_files: + typer.echo(files) + + dockerfile = get_dockerfile() + dockerfile = dockerfile.replace("PORT", port) + if typer.confirm("Show Dockerfile"): + typer.echo(dockerfile) + text_to_file(dockerfile, "Dockerfile") + + typer.echo(f"Building Docker {tag} 🐳") + cmd = f"docker build --tag {tag} ." + cmd = shlex.split(cmd) + process = subprocess.run(cmd, stdout=subprocess.PIPE, universal_newlines=True) + if process.returncode == 0: + typer.secho( + "Docker image has been created for your app! Check the image using ", + nl=False, + fg=typer.colors.GREEN, + ) + typer.secho("docker images", fg=typer.colors.BRIGHT_BLUE) + else: + typer.secho("Docker build failed!", fg=typer.colors.RED) diff --git a/chitra/cli/main.py b/chitra/cli/main.py new file mode 100644 index 00000000..830165fe --- /dev/null +++ b/chitra/cli/main.py @@ -0,0 +1,19 @@ +import typer + +from chitra import __version__ +from chitra.cli import builder + +app = typer.Typer( + name="chitra CLI ✨", + add_completion=False, +) + +app.add_typer( + builder.app, + name="builder", +) + + +@app.command() +def version(): + typer.echo(f"Hey 👋! You're running chitra version={__version__} ✨") diff --git a/chitra/constants.py b/chitra/constants.py index 6ecb2639..06b8fb58 100644 --- a/chitra/constants.py +++ b/chitra/constants.py @@ -1,4 +1,8 @@ -IMAGENET_LABEL_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt' +import os + +IMAGENET_LABEL_URL = ( + "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" +) _TF = "tensorflow" _TF_GPU = "tensorflow-gpu" @@ -6,3 +10,8 @@ _TORCHVISION = "torchvision" _JAX = "jax" _FLAX = "flax" + +CHITRA_URL_SEP = "[-]" +CACHE_DIR = os.environ.get("HOME", ".") + "/.chitra_cache" +IMAGE_CACHE_DIR = f"{CACHE_DIR}/images/" +DOCKER_BASE_URL = "unix://var/run/docker.sock" diff --git a/chitra/converter/core.py b/chitra/converter/core.py deleted file mode 100644 index 2eae88c1..00000000 --- a/chitra/converter/core.py +++ /dev/null @@ -1,73 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/08_model_converter.ipynb (unless otherwise specified). - -__all__ = [ - 'pytorch_to_onnx', 'onnx_to_pytorch', 'tf2_to_onnx', 'tf2_to_pytorch' -] - -# Cell - -# Cell -import torch.onnx - - -def pytorch_to_onnx(model, tensor, export_path="temp.onnx"): - # Input to the model - torch_out = model(tensor) - - # Export the model - torch.onnx.export( - model, # model being run - tensor, # model input (or a tuple for multiple inputs) - export_path, # where to save the model (can be a file or file-like object) - export_params= - True, # store the trained parameter weights inside the model file - opset_version=10, # the ONNX version to export the model to - do_constant_folding= - True, # whether to execute constant folding for optimization - input_names=["input"], # the model's input names - output_names=["output"], # the model's output names - dynamic_axes={ - "input": { - 0: "batch_size" - }, # variable length axes - "output": { - 0: "batch_size" - }, - }, - ) - return export_path - - -# Cell -import onnx -import tf2onnx -from onnx2pytorch import ConvertModel - - -def onnx_to_pytorch(onnx_model): - if isinstance(onnx_model, str): - onnx_model = onnx.load(onnx_model) - onnx.checker.check_model(onnx_model) - pytorch_model = ConvertModel(onnx_model) - return pytorch_model - - -def tf2_to_onnx(model, opset=None, output_path=None, **kwargs): - inputs_as_nchw = kwargs.get("inputs_as_nchw", "input0:0") - onnx_model = tf2onnx.convert.from_keras(model, - opset=opset, - output_path=output_path, - inputs_as_nchw=inputs_as_nchw) - return onnx_model - - -def tf2_to_pytorch(model, opset=None, **kwargs): - with tempfile.NamedTemporaryFile(mode='w') as fw: - filename = fw.name - onnx_model = tf2_to_onnx(tf_model, - opset, - output_path=filename, - **kwargs) - fw.seek(0) - torch_model = onnx_to_pytorch(filename) - return torch_model diff --git a/chitra/coordinates.py b/chitra/coordinates.py new file mode 100644 index 00000000..f7dc96fd --- /dev/null +++ b/chitra/coordinates.py @@ -0,0 +1,114 @@ +from typing import List, Optional, Tuple, Union + +import numpy as np +from imgaug.augmentables import bbs + + +class BoundingBoxes: + CENTER = "XXYY" + CORNER = "XYXY" + + def __init__( + self, + bboxes: Optional[List[list]] = None, + labels: Optional[List[Union[int, str]]] = None, + box_format: str = "xyxy", + ): + """Args: + bboxes: list of bounding boxes [(x1, y1, x2, y2), ...] or [(xc, yc, h, w), ...] + labels: list of strings or integers + box_format: + - `xyxy` for corner points of bbox + - `xyhw` for x-center, y-center, height and width format of bbox + """ + if box_format.upper() not in ( + self.CENTER, + self.CORNER, + ): + raise AssertionError("bbox format must be either xyxy or xyhw") + bboxes = self._listify(bboxes, 4) + labels = self._listify(labels) + + if len(bboxes) != len(labels): + raise UserWarning( + f"len of boxes and labels not matching: {len(bboxes), len(labels)}" + ) + self._format = box_format.upper() + self.bboxes = self._list_to_bbox(bboxes, labels) + self._state = {} + + def _listify(self, item, dim_trigger=None): + if item is None: + return item + + if not isinstance(item, (list, tuple)): + return [item] + + if isinstance(item, (list, tuple)) and self.num_dim(item) == dim_trigger: + item = [item] + return item + + @staticmethod + def num_dim(item): + return len(item) + + @staticmethod + def center_to_corner(cx, cy, h, w): + xmin = cx - w / 2 + xmax = cx + w / 2 + ymin = cy - h / 2 + ymax = cy + h / 2 + + return xmin, ymin, xmax, ymax + + @staticmethod + def corner_to_center(xmin, ymin, xmax, ymax): + w = xmax - xmin + h = ymax - ymin + + cx = xmin + w / 2 + cy = ymin + h / 2 + + return cx, cy, h, w + + def _list_to_bbox( + self, + bbox_list: Optional[List[List[Union[int, float]]]], + labels: List[Union[str, int]] = None, + ) -> List[bbs.BoundingBox]: + """Converts bbox list into `imgaug BoundigBox` object.""" + _format = self._format + + if not bbox_list: + return [] + + if not labels: + labels = [None] * self.num_dim(bbox_list) + + bbox_objects = [] + for bbox, label in zip(bbox_list, labels): + if _format == self.CENTER: + bbox = self.center_to_corner(*bbox) + bbox_objects.append(bbs.BoundingBox(*bbox, label)) + return bbox_objects + + def __getitem__(self, idx): + return self.bboxes[idx] + + def __repr__(self): + return str(self.bboxes) + + def get_bounding_boxes_on_image( + self, image_shape: Tuple[int] + ) -> bbs.BoundingBoxesOnImage: + """returns `imgaug BoundingBoxesOnImage` object which can be used to + boxes on the image.""" + return bbs.BoundingBoxesOnImage(self.bboxes, image_shape) + + def resize_with_image( + self, old_image_size: List[int], rescaled_image: np.ndarray + ) -> bbs.BoundingBoxesOnImage: + bbox_on_image = self.get_bounding_boxes_on_image(old_image_size) + bbox_on_rescaled_image = bbox_on_image.on(rescaled_image) + self.bboxes = bbox_on_rescaled_image.bounding_boxes + return bbox_on_rescaled_image diff --git a/chitra/core.py b/chitra/core.py index 69be127f..031c7974 100644 --- a/chitra/core.py +++ b/chitra/core.py @@ -1,44 +1,38 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_core.ipynb (unless otherwise specified). - -__all__ = [ - 'remove_dsstore', 'get_basename', 'IMAGENET_LABELS', 'load_imagenet_labels' -] - import os import pathlib -from typing import Tuple +from typing import Optional, Tuple import requests import tensorflow as tf -from loguru import logger from chitra.constants import IMAGENET_LABEL_URL +from chitra.logging import logger -IMAGENET_LABELS: Tuple[str] = None +IMAGENET_LABELS: Optional[Tuple[str]] = None def remove_dsstore(path) -> None: - """ - Deletes .DS_Store files from path and sub-folders of path. - """ + """Deletes .DS_Store files from path and sub-folders of path.""" path = pathlib.Path(path) - for e in path.glob('*.DS_Store'): + for e in path.glob("*.DS_Store"): os.remove(e) - for e in path.glob('*/*.DS_Store'): + for e in path.glob("*/*.DS_Store"): os.remove(e) -def get_basename(path: tf.string): - assert isinstance(path, tf.Tensor) +def get_basename(path: tf.string) -> tf.string: + if not isinstance(path, tf.Tensor): + raise AssertionError return tf.strings.split(path, os.path.sep)[-1] def load_imagenet_labels() -> Tuple[str]: global IMAGENET_LABELS if IMAGENET_LABELS is None: - logger.debug('Downloading imagenet labels...') - IMAGENET_LABELS = requests.get(IMAGENET_LABEL_URL).content.decode( - 'UTF-8').split('\n')[1:] + logger.debug("Downloading imagenet labels...") + IMAGENET_LABELS = ( + requests.get(IMAGENET_LABEL_URL).content.decode("UTF-8").split("\n")[1:] + ) return IMAGENET_LABELS diff --git a/chitra/data_processing/__init__.py b/chitra/data_processing/__init__.py new file mode 100644 index 00000000..eb533456 --- /dev/null +++ b/chitra/data_processing/__init__.py @@ -0,0 +1,2 @@ +from chitra.data_processing.default import DefaultTextProcessor, DefaultVisionProcessor +from chitra.data_processing.processor import DataProcessor diff --git a/chitra/data_processing/default/__init__.py b/chitra/data_processing/default/__init__.py new file mode 100644 index 00000000..49c5bb42 --- /dev/null +++ b/chitra/data_processing/default/__init__.py @@ -0,0 +1,2 @@ +from chitra.data_processing.default.nlp import DefaultTextProcessor +from chitra.data_processing.default.vision import DefaultVisionProcessor diff --git a/chitra/data_processing/default/nlp.py b/chitra/data_processing/default/nlp.py new file mode 100644 index 00000000..2949fe94 --- /dev/null +++ b/chitra/data_processing/default/nlp.py @@ -0,0 +1,5 @@ +from ..processor import DataProcessor + + +class DefaultTextProcessor: + nlp = DataProcessor(lambda x: x, lambda x: x) diff --git a/chitra/data_processing/default/vision.py b/chitra/data_processing/default/vision.py new file mode 100644 index 00000000..48eedcbb --- /dev/null +++ b/chitra/data_processing/default/vision.py @@ -0,0 +1,59 @@ +from io import BytesIO +from typing import List, Optional, Tuple + +import numpy as np +from PIL import Image + +from chitra.image import Chitra + +from ..processor import DataProcessor + + +def default_preprocess( + data, + image_shape: Optional[Tuple[int, int]] = None, + rescale: bool = True, + expand_dims: bool = True, +) -> np.ndarray: + """ + Supports image resize, rescaling and dimension expansion along 0th index + Args: + data: Image File buffer or numpy array + image_shape: Target image size + rescale: If true then image will be rescaled as [-1, 1] + expand_dims: + + Returns: + preprocessed numpy array image + """ + if isinstance(data, str): + image = Image.open(BytesIO(data)).convert("RGB") + elif isinstance(data, np.ndarray): + image = Chitra(data).image + else: + raise UserWarning(f"preprocessing not implemented for this data type -> {data}") + + if image_shape: + image = image.resize(image_shape) + + image = np.asarray(image).astype(np.float32) + if rescale: + image = image / 127.5 - 1.0 + if expand_dims: + image = np.expand_dims(image, 0) + return image + + +def default_postprocess(data, return_type: Optional[str] = "list") -> List: + if not isinstance(data, (np.ndarray, list, tuple, int, float)): + data = data.numpy() + if return_type == "list": + if isinstance(data, np.ndarray): + data = data.tolist() + else: + list(data) + return data + + +class DefaultVisionProcessor: + vision = DataProcessor(default_preprocess, default_postprocess) diff --git a/chitra/data_processing/processor.py b/chitra/data_processing/processor.py new file mode 100644 index 00000000..9bc8c244 --- /dev/null +++ b/chitra/data_processing/processor.py @@ -0,0 +1,35 @@ +from typing import Callable, Optional + + +class DataProcessor: + def __init__( + self, + preprocess_fn: Optional[Callable] = None, + postprocess_fn: Optional[Callable] = None, + ): + self._preprocess_fn = preprocess_fn + self._postprocess_fn = postprocess_fn + + @property + def preprocess_fn(self): + return self._preprocess_fn + + @property + def postprocess_fn(self): + return self._postprocess_fn + + def set_preprocess_fn(self, func): + self._preprocess_fn = func + + def set_postprocess_fn(self, func): + self._postprocess_fn = func + + def preprocess(self, x, **kwargs): + if self._preprocess_fn is None: + raise UserWarning("preprocess method is not defined") + return self._preprocess_fn(x, **kwargs) + + def postprocess(self, x, **kwargs): + if self._postprocess_fn is None: + raise UserWarning("postprocess method not defined") + return self._postprocess_fn(x, **kwargs) diff --git a/chitra/datagenerator.py b/chitra/datagenerator.py index b76e6ba9..8382839b 100644 --- a/chitra/datagenerator.py +++ b/chitra/datagenerator.py @@ -1,11 +1,3 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_datagenerator.ipynb (unless otherwise specified). - -__all__ = [ - 'benchmark', 'get_filenames', 'get_label', 'ImageSizeList', 'Pipeline', - 'Dataset' -] - -# Cell import os import pathlib import random @@ -18,15 +10,14 @@ import tensorflow as tf from typeguard import check_argument_types, typechecked -from .tf_image import read_image, resize_image +from chitra.image.tf_image import read_image, resize_image -# Cell def benchmark(dataset, num_epochs=2, fake_infer_time=0.001): - """Use this function to benchmark your Dataset loading time""" + """Use this function to benchmark your Dataset loading time.""" start_time = time.perf_counter() - for epoch_num in range(num_epochs): - for sample in dataset: + for _ in range(num_epochs): + for _ in dataset: # Performing a training step time.sleep(fake_infer_time) tf.print( @@ -34,7 +25,6 @@ def benchmark(dataset, num_epochs=2, fake_infer_time=0.001): ) -# Cell def get_filenames(root_dir): root_dir = pathlib.Path(root_dir) return glob(str(root_dir / "*/*")) @@ -44,14 +34,15 @@ def get_label(filename): return filename.split("/")[-2] -# Cell class ImageSizeList: def __init__(self, img_sz_list=None): - if isinstance(img_sz_list, (list, tuple)): - if len(img_sz_list) != 0 and not isinstance( - img_sz_list[0], (list, tuple)): - img_sz_list = [img_sz_list][:] + if ( + isinstance(img_sz_list, (list, tuple)) + and len(img_sz_list) != 0 + and not isinstance(img_sz_list[0], (list, tuple)) + ): + img_sz_list = [img_sz_list][:] self.start_size = None self.last_size = None @@ -62,7 +53,7 @@ def __init__(self, img_sz_list=None): self.start_size = img_sz_list[0] self.last_size = img_sz_list[-1] self.curr_size = img_sz_list[0] - except (IndexError, TypeError) as e: + except (IndexError, TypeError): print("No item present in the image size list") self.curr_size = None # no item present in the list @@ -70,7 +61,7 @@ def get_size(self): img_sz_list = self.img_sz_list try: self.curr_size = img_sz_list.pop(0) - except (IndexError, AttributeError) as e: + except (IndexError, AttributeError): print(f"Returning the last set size which is: {self.curr_size}") return self.curr_size @@ -79,8 +70,9 @@ def get_size(self): # Cell class Pipeline: @typechecked - def __init__(self, funcs: Union[Callable, list, tuple] = []): - assert check_argument_types() + def __init__(self, funcs: Union[Callable, list, tuple] = None): + if not check_argument_types(): + raise AssertionError if isinstance(funcs, list): self.funcs = funcs elif callable(funcs): @@ -90,7 +82,8 @@ def __init__(self, funcs: Union[Callable, list, tuple] = []): @typechecked def add(self, func: Callable): - assert check_argument_types() + if not check_argument_types(): + raise AssertionError self.funcs.append(func) def __call__(self, item): @@ -98,31 +91,32 @@ def __call__(self, item): for func in self.funcs: item = func(item) except Exception as e: - print(f"Error while applying function in pipeline!") + print("Error while applying function in pipeline!") raise e return item -# Cell class Dataset: MAPPINGS = { - "PY_TO_TF": { - str: tf.string, - int: tf.int32, - float: tf.float32 - }, + "PY_TO_TF": {str: tf.string, int: tf.int32, float: tf.float32}, } def __init__( self, train_dir: Union[str, Path], - image_size=[], + image_size=None, transforms=None, default_encode=True, **kwargs, ): """ - train_dir(str): Path for training data + Create a Dataset object that can generate tf.data.Dataset + Args: + train_dir: + image_size: + transforms: + default_encode: + **kwargs: """ self.get_filenames = get_filenames self.read_image = read_image @@ -134,8 +128,8 @@ def __init__( self.filenames = self.get_filenames(train_dir) self.num_files = len(self.filenames) self.image_size = image_size - self.img_sz_list = ImageSizeList(self.image_size[:]) - + self.img_sz_list = ImageSizeList(self.image_size) + self.step_size = None self.labels = kwargs.get("labels", self.get_labels()) def __len__(self): @@ -147,9 +141,10 @@ def _process(self, filename): return image, label def _reload(self): + image_size = self.image_size[:] self.filenames = self.get_filenames(self.root_dir) self.num_files = len(self.filenames) - self.img_sz_list = ImageSizeList(None or self.image_size[:]) + self.img_sz_list = ImageSizeList(image_size) self.labels = self.get_labels() def _capture_return_types(self): @@ -160,18 +155,19 @@ def _capture_return_types(self): if isinstance(outputs, tuple): for ret_type in outputs: return_types.append( - ret_type.dtype if tf.is_tensor(ret_type) else Dataset. - MAPPINGS["PY_TO_TF"][type(ret_type)]) + ret_type.dtype + if tf.is_tensor(ret_type) + else Dataset.MAPPINGS["PY_TO_TF"][type(ret_type)] + ) else: - return_types.append(ret_type.dtype if tf.is_tensor(ret_type) else - Dataset.MAPPINGS["PY_TO_TF"][type(ret_type)]) + raise UserWarning("Unable to capture return type!") return tuple(return_types) def __getitem__(self, idx): filename = self.filenames[idx] return self._process(filename) - def update_component(self, component_name, new_component, reload=True): + def update_component(self, component_name, new_component): setattr(self, component_name, new_component) print(f"{component_name} updated with {new_component}") self._reload() @@ -192,7 +188,8 @@ def get_labels(self): def label_encoder(self, label): idx = self.label_to_idx.get(label, None) - assert idx is not None, f"Error while converting label={label} to index!" + if idx is None: + raise AssertionError(f"Error while converting label={label} to index!") return idx def generator(self, shuffle=False): @@ -214,7 +211,6 @@ def get_tf_dataset(self, output_shape=None, shuffle=True): return_types = self._capture_return_types() self._reload() generator = partial(self.generator, shuffle=shuffle) - datagen = tf.data.Dataset.from_generator(generator, return_types, - output_shape) + datagen = tf.data.Dataset.from_generator(generator, return_types, output_shape) return datagen diff --git a/chitra/dataloader.py b/chitra/dataloader.py index 1b2622f8..92ec3115 100644 --- a/chitra/dataloader.py +++ b/chitra/dataloader.py @@ -1,7 +1,3 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_dataloader.ipynb (unless otherwise specified). - -__all__ = ['AUTOTUNE', 'get_basename', 'show_batch', 'Clf'] - import math import os import pathlib @@ -10,54 +6,15 @@ import matplotlib.pyplot as plt import tensorflow as tf +from chitra.image.tf_image import read_image, resize_image + from .core import remove_dsstore -from .tf_image import read_image, resize_image +from .utility.tf_utils import get_basename -# Cell AUTOTUNE = tf.data.experimental.AUTOTUNE -# Cell -def get_basename(path: tf.string): - assert isinstance(path, tf.Tensor) - return tf.strings.split(path, os.path.sep)[-1] - - -def show_batch(clf, limit: int, figsize: tuple = (10, 10)): - """Visualize image and labels - - https://www.tensorflow.org/tutorials/load_data/images#load_using_keraspreprocessing - - Args: - data: tf.data.Dataset containing image, label - limit: number of images to display - figsize: size of visualization - Returns: - Displays images and labels - """ - assert isinstance(limit, int) - assert isinstance(figsize, tuple) - - data = clf.data - idx_to_class = clf.idx_to_class - - plt.figure(figsize=figsize) - sub_plot_size = math.ceil(limit / 2) - - for i, e in enumerate(data.take(limit)): - image, label = e - image = image.numpy().astype('uint8') - label = idx_to_class[label.numpy()] if idx_to_class else label.numpy() - - ax = plt.subplot(sub_plot_size, sub_plot_size, i + 1) - - plt.imshow(image) - plt.title(label) - plt.axis('off') - - -# Cell -class Clf(object): +class Clf: def __init__(self): self.CLASS_NAMES = None self.data = None @@ -68,7 +25,7 @@ def __init__(self): self._lookup_class_to_idx = None def show_batch(self, limit: int, figsize: tuple = (10, 10)): - """Visualize image and labels + """Visualize image and labels. https://www.tensorflow.org/tutorials/load_data/images#load_using_keraspreprocessing @@ -79,11 +36,14 @@ def show_batch(self, limit: int, figsize: tuple = (10, 10)): Returns: Displays images and labels """ - assert isinstance(limit, int) - assert isinstance(figsize, tuple) + if not isinstance(limit, int): + raise AssertionError + if not isinstance(figsize, tuple): + raise AssertionError data = self.data - if data is None: raise Exception('TF.data not created yet!') + if data is None: + raise UserWarning("TF.data not created yet!") idx_to_class = self.idx_to_class plt.figure(figsize=figsize) @@ -91,22 +51,23 @@ def show_batch(self, limit: int, figsize: tuple = (10, 10)): for i, e in enumerate(data.take(limit)): image, label = e - image = image.numpy().astype('uint8') - label = idx_to_class[ - label.numpy()] if idx_to_class else label.numpy() + image = image.numpy().astype("uint8") + label = idx_to_class[label.numpy()] if idx_to_class else label.numpy() - ax = plt.subplot(sub_plot_size, sub_plot_size, i + 1) + plt.subplot(sub_plot_size, sub_plot_size, i + 1) plt.imshow(image) plt.title(label) - plt.axis('off') + plt.axis("off") - def _get_image_list(self, path: str): + @staticmethod + def _get_image_list(path: str): """`path`: pathlib.Path Returns: list of images """ - assert isinstance(path, str) - list_images = tf.data.Dataset.list_files(f'{path}/*/*') + if not isinstance(path, str): + raise AssertionError + list_images = tf.data.Dataset.list_files(f"{path}/*/*") return list_images @tf.function @@ -118,13 +79,10 @@ def _process_path(self, path: str): Returns: image, label """ - assert isinstance( - path, - (str, - tf.Tensor)), f'type of path is {type(path)}, expected type str' + if not isinstance(path, (str, tf.Tensor)): + raise AssertionError(f"type of path is {type(path)}, expected type str") img = read_image(path) - # TODO: resizing should be done separately # py_function will degrade performance if self.shape: [ @@ -134,35 +92,33 @@ def _process_path(self, path: str): # img = tf.image.resize(img, self.shape) label = tf.strings.split(path, os.path.sep)[-2] - label = self._lookup_class_to_idx.lookup( - label) if self._lookup_class_to_idx else label + label = ( + self._lookup_class_to_idx.lookup(label) + if self._lookup_class_to_idx + else label + ) return img, label @tf.function def _ensure_shape(self, img, labels): - """Ensures the output shape of images (InputSpecs) - """ - img = tf.ensure_shape(img, (*self.shape, 3), name='image') + """Ensures the output shape of images (InputSpecs)""" + img = tf.ensure_shape(img, (*self.shape, 3), name="image") return img, labels def create_lookup_table(self): - """Creates tf.lookup.StaticHashTable for encoding labels""" - + """Creates tf.lookup.StaticHashTable for encoding labels.""" keys = list(self.class_to_idx.keys()) vals = list(self.class_to_idx.values()) keys_tensor = keys # tf.constant(keys) vals_tensor = vals # tf.constant(vals) - table_init = tf.lookup.KeyValueTensorInitializer( - keys_tensor, vals_tensor) + table_init = tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor) self._lookup_class_to_idx = tf.lookup.StaticHashTable(table_init, -1) def _get_classnames(self, list_folders, encode_classes: bool = True): - """""" - self.CLASS_NAMES = tuple( - get_basename(e).numpy().decode() for e in list_folders) + self.CLASS_NAMES = tuple(get_basename(e).numpy().decode() for e in list_folders) if encode_classes: self._encode_classes() @@ -176,11 +132,13 @@ def _encode_classes(self): self.create_lookup_table() - def from_folder(self, - path: Union[str, pathlib.Path], - target_shape: Union[None, tuple] = (224, 224), - shuffle: Union[bool, int] = True, - encode_classes: bool = True): + def from_folder( + self, + path: Union[str, pathlib.Path], + target_shape: Union[None, tuple] = (224, 224), + shuffle: Union[bool, int] = True, + encode_classes: bool = True, + ): """Load dataset from given path. Args: path: string, path of folder containing dataset @@ -193,18 +151,21 @@ def from_folder(self, By default the loaded image size is 224x224, pass None to load original size. You will get error on `batch()` method if all image size are not same. """ - assert isinstance(path, (str, pathlib.Path)) - assert isinstance(shuffle, (bool, int)), print( - f'Arg: shuffle is either bool or int but got {shuffle} : {type(shuffle)}' - ) + if not isinstance(path, (str, pathlib.Path)): + raise AssertionError + if not isinstance(shuffle, (bool, int)): + raise AssertionError( + print( + f"Arg: shuffle is either bool or int but got {shuffle} : {type(shuffle)}" + ) + ) path = pathlib.Path(path) remove_dsstore(path) - # TODO comments self.shape = target_shape - list_folders = tf.data.Dataset.list_files(str(path / '*')) + list_folders = tf.data.Dataset.list_files(str(path / "*")) list_images = self._get_image_list(str(path)) if shuffle: @@ -215,9 +176,9 @@ def from_folder(self, self._get_classnames(list_folders, encode_classes) if encode_classes: - print(f'CLASSES ENCODED: {self.class_to_idx}') + print(f"CLASSES ENCODED: {self.class_to_idx}") else: - print(f'CLASSES FOUND: {self.CLASS_NAMES}') + print(f"CLASSES FOUND: {self.CLASS_NAMES}") data = list_images.map(self._process_path, num_parallel_calls=AUTOTUNE) diff --git a/chitra/image.py b/chitra/image.py deleted file mode 100644 index d9427449..00000000 --- a/chitra/image.py +++ /dev/null @@ -1,220 +0,0 @@ -__all__ = ['DATA_FORMATS', 'DEFAULT_MODE', 'BoundingBoxes', 'Chitra'] - -import os -from io import BytesIO - -import matplotlib.pyplot as plt -import numpy as np -import requests -from PIL import Image - -from .constants import _TF, _TORCH -from .utility.import_utils import INSTALLED_MODULES - -tf = None -torch = None - -if INSTALLED_MODULES.get(_TF, None): - import tensorflow as tf - -if INSTALLED_MODULES.get(_TORCH, None): - import torch - -# Cell -from typing import List, Optional, Union - -DATA_FORMATS = Union[str, Image.Image, np.ndarray, tf.Tensor, torch.Tensor] -DEFAULT_MODE = os.environ.get("CHITRA_DEFAULT_MODE", "TF") - -from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage - - -def _url_to_image(url: str) -> Image.Image: - """returns Image from url""" - assert url.lower().startswith("http"), "invalid url, must start with http" - content = requests.get(url).content - image = Image.open(BytesIO(content)) - return image - - -# Cell -class BoundingBoxes: - CENTER = "XXYY" - CORNER = "XYXY" - - def __init__(self, - bboxes: Optional[List[list]] = None, - labels: Optional[List[Union[int, str]]] = None, - format: str = 'xyxy'): - """Args: - bboxes: list of bounding boxes [(x1, y1, x2, y2), ...] or [(xc, yc, h, w), ...] - labels: list of strings or integers - format: - - `xyxy` for corner points of bbox - - `xyhw` for x-center, y-center, height and width format of bbox - """ - assert format.upper() in ( - self.CENTER, - self.CORNER), f"bbox format must be either xyxy or xyhw" - bboxes = self._listify(bboxes, 4) - labels = self._listify(labels) - assert len(bboxes) == len( - labels - ), f"len of boxes and labels not matching: {len(bboxes), len(labels)}" - - self._format = format.upper() - self.bboxes = self._list_to_bbox(bboxes, labels) - self._state = {} - - def _listify(self, item, dim_trigger=None): - if item is None: - return item - - if not isinstance(item, (list, tuple)): - return [item] - - if isinstance(item, (list, tuple)): - if self.num_dim(item) == dim_trigger: - item = [item] - return item - - @staticmethod - def num_dim(item): - return len(item) - - @staticmethod - def center_to_corner(cx, cy, h, w): - xmin = cx - w / 2 - xmax = cx + w / 2 - ymin = cy - h / 2 - ymax = cy + h / 2 - - return xmin, ymin, xmax, ymax - - @staticmethod - def corner_to_center(xmin, ymin, xmax, ymax): - w = xmax - xmin - h = ymax - ymin - - cx = xmin + w / 2 - cy = ymin + h / 2 - - return cx, cy, h, w - - def _list_to_bbox( - self, - bbox_list: Optional[List[List[Union[int, float]]]], - labels: List[Union[str, int]] = None) -> List[BoundingBox]: - """Converts bbox list into `imgaug BoundigBox` object - """ - format = self._format - - if not bbox_list: - return None - - if not labels: - labels = [None] * self.num_dim(bbox_list) - - bbox_objects = [] - for bbox, label in zip(bbox_list, labels): - if format == self.CENTER: - bbox = self.center_to_corner(*bbox) - bbox_objects.append(BoundingBox(*bbox, label)) - return bbox_objects - - def __getitem__(self, idx): - return self.bboxes[idx] - - def __repr__(self): - return str(self.bboxes) - - def get_bounding_boxes_on_image(self, image_shape): - """returns `imgaug BoundingBoxesOnImage` object which can be used to boxes on the image - """ - return BoundingBoxesOnImage(self.bboxes, image_shape) - - -# Cell -class Chitra: - """Ultimate image utility class. - 1. Load image from file, web url, numpy or bytes - 2. Plot image - 3. Draw bounding boxes - """ - def __init__(self, - data, - bboxes=None, - labels=None, - format=BoundingBoxes.CORNER, - *args, - **kwargs) -> None: - """Args: - data: numpy, url, filelike - """ - super().__init__() - self.image = self._load_image(data) - self.bboxes = None - - if bboxes is not None: - self.bboxes = BoundingBoxes(bboxes, labels) - - def _load_image(self, data: DATA_FORMATS): - if isinstance(data, Image.Image): - return data - - if isinstance(data, (tf.Tensor, torch.Tensor)): - data = data.numpy() - - if isinstance(data, str): - if data.startswith("http"): - image = _url_to_image(data) - else: - image = Image.open(data) - - elif isinstance(data, np.ndarray): - image = Image.fromarray(data) - - else: - raise UserWarning("unable to load image!") - - return image - - def numpy(self): - return np.asarray(self.image) - - def to_tensor(self, mode: str = DEFAULT_MODE): - """mode: tf/torch/pt""" - mode = mode.upper() - np_image = self.numpy() - - if mode == "TF": - tensor = tf.constant(np_image) - elif mode in ("TORCH", "PT"): - tensor = torch.from_numpy(np_image) - else: - raise UserWarning("invalid mode!") - return tensor - - @property - def shape(self): - return self.numpy().shape - - @property - def size(self): - return self.image.size - - def imshow(self, cmap=plt.cm.Blues, *args, **kwargs): - plt.imshow(self.numpy(), cmap, *args, **kwargs) - - def draw_boxes( - self, - marker_size: int = 2, - color=(0, 255, 0), - ): - if self.bboxes is None: - raise UserWarning('bboxes is None') - - bbox_on_image = self.bboxes.get_bounding_boxes_on_image(self.shape) - return bbox_on_image.draw_on_image(self.numpy(), - color=color, - size=marker_size) diff --git a/chitra/image/__init__.py b/chitra/image/__init__.py new file mode 100644 index 00000000..fa09bcf0 --- /dev/null +++ b/chitra/image/__init__.py @@ -0,0 +1 @@ +from .image import Chitra diff --git a/chitra/image/image.py b/chitra/image/image.py new file mode 100644 index 00000000..32c084ad --- /dev/null +++ b/chitra/image/image.py @@ -0,0 +1,143 @@ +import io +import os +from pathlib import Path +from typing import Any, List, Union + +import matplotlib.pyplot as plt +import numpy as np +import requests +from PIL import Image + +from chitra.constants import CHITRA_URL_SEP, IMAGE_CACHE_DIR +from chitra.coordinates import BoundingBoxes + +DATA_FORMATS = Union[str, Image.Image, np.ndarray] + + +def _cache_image(image: Image.Image, image_path: str): + cache_dir = Path(IMAGE_CACHE_DIR) + filename = image_path.replace("/", CHITRA_URL_SEP) + os.makedirs(cache_dir, exist_ok=True) + image.save(cache_dir / filename) + + +def _url_to_image(url: str, cache: bool) -> Image.Image: + """returns Image from url.""" + filename = url.replace("/", CHITRA_URL_SEP) + cache_file = Path(IMAGE_CACHE_DIR) / filename + if cache and os.path.exists(cache_file): + return Image.open(cache_file) + + if not url.lower().startswith("http"): + raise AssertionError("invalid url, must start with http") + content = requests.get(url).content + image = Image.open(io.BytesIO(content)) + if cache: + _cache_image(image, url) + return image + + +class Chitra: + """Ultimate image utility class. + + 1. Load image from file, web url, numpy or bytes + 2. Plot image + 3. Draw bounding boxes + """ + + def __init__( + self, + data: Any, + bboxes: List = None, + labels: List = None, + box_format: str = BoundingBoxes.CORNER, + cache: bool = False, + *args, + **kwargs + ) -> None: + """ + + Args: + data: numpy, url, filelike + bboxes: + labels: + box_format: + cache[bool]: Whether to cache downloaded image + *args: + **kwargs: + """ + super().__init__() + self.image = self._load_image(data, cache=cache) + self.bboxes = None + + if bboxes is not None: + self.bboxes = BoundingBoxes(bboxes, labels) + + @staticmethod + def _load_image(data: DATA_FORMATS, cache: bool): + if isinstance(data, Image.Image): + return data + + if isinstance(data, bytes): + return Image.open(io.BytesIO(data)) + + if isinstance(data, str): + if data.startswith("http"): + image = _url_to_image(data, cache) + + else: + image = Image.open(data) + + elif isinstance(data, np.ndarray): + image = Image.fromarray(data) + + else: + raise UserWarning("unable to load image!") + + return image + + def numpy(self): + return np.asarray(self.image) + + @property + def shape(self): + return self.numpy().shape + + @property + def size(self): + return self.image.size + + def imshow(self, cmap=plt.cm.Blues, **kwargs): + plt.imshow(self.numpy(), cmap, **kwargs) + + def draw_boxes( + self, + marker_size: int = 2, + color=(0, 255, 0), + ): + if self.bboxes is None: + raise UserWarning("bboxes is None") + + bbox_on_image = self.bboxes.get_bounding_boxes_on_image(self.shape) + return bbox_on_image.draw_on_image( + self.numpy()[..., :3], color=color, size=marker_size + ) + + def resize(self, *args, **kwargs) -> Image.Image: + """ + Calls PIL.Image.resize method and passes the arguments + Args: + *args: + **kwargs: + + Returns: + resized PIL.Image + """ + self.image = self.image.resize(*args, **kwargs) + return self.image + + def resize_image_with_bbox(self, size: List[int]): + old_size = self.shape + self.image = self.image.resize(size) + self.bboxes.resize_with_image(old_size, self.numpy()) + return self.image, self.bboxes diff --git a/chitra/image/tf_image.py b/chitra/image/tf_image.py new file mode 100644 index 00000000..ca65f361 --- /dev/null +++ b/chitra/image/tf_image.py @@ -0,0 +1,23 @@ +from typing import Union + +import tensorflow as tf + + +def read_image(path: str, channels: int = 3): + """Reads an image file from the path and return the rgb image in tf.Tensor + format.""" + img: tf.Tensor = tf.io.read_file(path) + img: tf.Tensor = tf.io.decode_image(img, channels=channels, expand_animations=False) + return img + + +def resize_image(image: tf.Tensor, size: Union[tf.Tensor, tuple], **kwargs): + """Resize image to the target `size`: Union[tf.Tensor, tuple]""" + if not isinstance(image, tf.Tensor): + raise AssertionError( + f"image must be of type tf.Tensor but passed {type(image)}" + ) + if not isinstance(size, (tuple, tf.Tensor)): + raise AssertionError + method = kwargs.get("method", "bilinear") + return tf.image.resize(image, size, method) diff --git a/chitra/import_utils.py b/chitra/import_utils.py new file mode 100644 index 00000000..b7a4e2f3 --- /dev/null +++ b/chitra/import_utils.py @@ -0,0 +1,5 @@ +import importlib + + +def is_installed(module_name: str): + return importlib.util.find_spec(module_name) is not None diff --git a/chitra/imports.py b/chitra/imports.py new file mode 100644 index 00000000..4a54a7d7 --- /dev/null +++ b/chitra/imports.py @@ -0,0 +1,21 @@ +from chitra.constants import _FLAX, _JAX, _TF, _TF_GPU, _TORCH, _TORCHVISION +from chitra.import_utils import is_installed + +INSTALLED_MODULES = { + module: is_installed(module) + for module in (_TF, _TF_GPU, _TORCH, _TORCHVISION, _JAX, _FLAX) +} + +_FASTAPI_INSTALLED = is_installed("fastapi") +_UVICORN_INSTALLED = is_installed("uvicorn") +_PYDANTIC_INSTALLED = is_installed("pydantic") +_MULTIPART_INSTALLED = is_installed("multipart") +_SERVE_INSTALLED = ( + _FASTAPI_INSTALLED + and _UVICORN_INSTALLED + and _PYDANTIC_INSTALLED + and _MULTIPART_INSTALLED +) + +_LOGURU_INSTALLED = is_installed("loguru") +_RICH_INSTALLED = is_installed("rich") diff --git a/chitra/logging.py b/chitra/logging.py new file mode 100644 index 00000000..bd08a267 --- /dev/null +++ b/chitra/logging.py @@ -0,0 +1,8 @@ +from chitra.imports import _LOGURU_INSTALLED + +if _LOGURU_INSTALLED: + from loguru import logger +else: + import logging + + logger = logging.getLogger() diff --git a/chitra/serve/__init__.py b/chitra/serve/__init__.py new file mode 100644 index 00000000..95cfe1c7 --- /dev/null +++ b/chitra/serve/__init__.py @@ -0,0 +1,5 @@ +from chitra.serve.api import API, create_api +from chitra.serve.app import GradioApp +from chitra.serve.base import ModelServer +from chitra.serve.cloud.aws_serverless import ChaliceServer +from chitra.serve.cloud.base import CloudServer diff --git a/chitra/serve/api.py b/chitra/serve/api.py new file mode 100644 index 00000000..72f3dff0 --- /dev/null +++ b/chitra/serve/api.py @@ -0,0 +1,136 @@ +from typing import Callable, Dict, Optional + +import uvicorn +from fastapi import FastAPI, File, UploadFile + +from chitra.__about__ import documentation_url +from chitra.serve import schema +from chitra.serve.base import ModelServer +from chitra.serve.constants import IMAGE_CLF, OBJECT_DETECTION, QNA, TXT_CLF + + +class API(ModelServer): + def __init__( + self, + api_type: str, + model: Callable, + preprocess_fn: Optional[Callable] = None, + preprocess_conf: Optional[Dict] = None, + postprocess_fn: Optional[Callable] = None, + postprocess_conf: Optional[Dict] = None, + **kwargs, + ): + """ + Creates FastAPI app for `api_type` + Args: + api_type: Type of the API. See `API.available_api_types()` + model: Any ML/DL model + preprocess_fn: Override Data Preprocessing Function, data will + be processed with this function + before calling model. + postprocess_fn: Override Data Postprocessing Function, model + output will be passed into this function. + **kwargs: + """ + super(API, self).__init__( + api_type, model, preprocess_fn, postprocess_fn, **kwargs + ) + + docs_url = kwargs.get("docs_url", "/docs") + title = kwargs.get("title", "Chitra Model Server 🔥") + desc = kwargs.get( + "description", + f"Goto Chitra Docs 🔗", + ) + + self.app: FastAPI = FastAPI(title=title, description=desc, docs_url=docs_url) + if not preprocess_conf: + preprocess_conf = {} + if not postprocess_conf: + postprocess_conf = {} + self.preprocess_conf = preprocess_conf + self.postprocess_conf = postprocess_conf + self.setup(**kwargs) + + async def predict_image(self, file: UploadFile = File(...)): + preprocess_fn = self.data_processor.preprocess_fn + postprocess_fn = self.data_processor.postprocess_fn + + x = preprocess_fn(await file.read()) + x = self.model(x) + x = postprocess_fn(x) + return x + + async def predict_text(self, data: schema.Query): + data_processor = self.data_processor + x = data.query + if data_processor.preprocess_fn: + x = data_processor.preprocess(x) + x = self.model(x) + if data_processor.postprocess_fn: + x = data_processor.postprocess(x) + return x + + async def predict_question_answer(self, data: schema.QnARequest): + data_processor = self.data_processor + x = data.query, data.question + if data_processor.preprocess_fn: + x = data_processor.preprocess(x) + x = self.model(x) + if data_processor.postprocess_fn: + x = data_processor.postprocess(x) + return x + + def setup(self, **_): + + if self.api_type in (IMAGE_CLF, OBJECT_DETECTION): + self.app.post("/api/predict-image")(self.predict_image) + + elif self.api_type == TXT_CLF: + self.app.post("/api/predict-text")(self.predict_text) + + elif self.api_type == QNA: + self.app.post("/api/QnA")(self.predict_question_answer) + + def run(self): + uvicorn.run(self.app) + + +def create_api( + model: Callable, + api_type: str = "IMAGE-CLASSIFICATION", + preprocess_fn: Callable = None, + preprocess_conf: Optional[Dict] = None, + postprocess_fn: Callable = None, + postprocess_conf: Optional[Dict] = None, + run: bool = False, + **kwargs, +) -> API: + """ + Launch FastAPI app + Args: + model: Any ML/DL model + api_type: Type of the API task, see `chitra.serve.get_available_api_types()` + preprocess_fn: Override default preprocessing function + preprocess_conf: Arguments for preprocessing function + postprocess_fn: Override default postprocessing function + postprocess_conf: Arguments for postprocessing function + run: Set True to run the app + **kwargs: + + Returns: + Object of `chitra.serve.API` class + """ + api = API( + api_type, + model, + preprocess_fn=preprocess_fn, + preprocess_conf=preprocess_conf, + postprocess_fn=postprocess_fn, + postprocess_conf=postprocess_conf, + **kwargs, + ) + + if run: + api.run() + return api diff --git a/chitra/serve/app.py b/chitra/serve/app.py new file mode 100644 index 00000000..bff9900f --- /dev/null +++ b/chitra/serve/app.py @@ -0,0 +1,98 @@ +from typing import Callable, List, Optional, Union + +import gradio as gr +import numpy as np + +from chitra.__about__ import documentation_url +from chitra.serve import constants as const +from chitra.serve.base import ModelServer + + +class GradioApp(ModelServer): + API_TYPES = { + "VISION": (const.IMAGE_CLF, const.OBJECT_DETECTION), + "NLP": (const.TXT_CLF,), + } + + def __init__( + self, + api_type: str, + model: Callable, + input_types: Optional[Union[List, str]] = None, + output_types: Optional[Union[List, str]] = None, + preprocess_fn: Callable = None, + postprocess_fn: Callable = None, + preprocess_conf: Optional[dict] = None, + postprocess_conf: Optional[dict] = None, + **kwargs, + ): + super(GradioApp, self).__init__( + api_type, model, preprocess_fn, postprocess_fn, **kwargs + ) + if not preprocess_conf: + preprocess_conf = {} + if not postprocess_conf: + postprocess_conf = {} + + self.title = kwargs.get("title", "Chitra Server") + self.desc = kwargs.get( + "description", + f"Docs 🔗", + ) + self.input_types = input_types + self.output_types = output_types + self.api_type_func = {} + self.preprocess_conf = preprocess_conf + self.postprocess_conf = postprocess_conf + self.setup(**kwargs) + + def setup( + self, + **kwargs, + ): + if self.api_type in (const.IMAGE_CLF, const.OBJECT_DETECTION): + self.api_type_func[self.api_type] = self.single_x_classification + elif self.api_type == const.TXT_CLF: + self.api_type_func[self.api_type] = self.single_x_classification + else: + raise NotImplementedError(f"api_type={self.api_type} not implemented yet!") + + if not self.input_types: + self.input_types = self.get_input_type(**kwargs) + + if not self.output_types: + self.output_types = "json" + + def get_input_type(self, **kwargs): + label = kwargs.get("label") + if self.api_type in (const.IMAGE_CLF, const.OBJECT_DETECTION): + return gr.inputs.Image(shape=kwargs.get("image_shape"), label=label) + + if self.api_type == const.TXT_CLF: + return gr.inputs.Textbox( + lines=2, placeholder=kwargs.get("text_placeholder"), label=label + ) + raise NotImplementedError(f"{self.api_type} API Type is not implemented yet!") + + def single_x_classification(self, x: np.ndarray): + data_processor = self.data_processor + + if data_processor.preprocess_fn: + x = data_processor.preprocess(x, **self.preprocess_conf) + x = self.model(x) + if data_processor.postprocess_fn: + x = data_processor.postprocess(x, **self.postprocess_conf) + return x + + def run(self, share: bool = False, gr_interface_conf: Optional[dict] = None): + if not gr_interface_conf: + gr_interface_conf = {} + + gr.Interface( + fn=self.api_type_func[self.api_type], + inputs=self.input_types, + outputs=self.output_types, + title=self.title, + description=self.desc, + **gr_interface_conf, + ).launch(share=share) diff --git a/chitra/serve/base.py b/chitra/serve/base.py new file mode 100644 index 00000000..350a3383 --- /dev/null +++ b/chitra/serve/base.py @@ -0,0 +1,67 @@ +import abc +import itertools +from typing import Callable, List, Optional + +from chitra.data_processing import ( + DataProcessor, + DefaultTextProcessor, + DefaultVisionProcessor, +) +from chitra.serve import constants as const + + +class ModelServer: + API_TYPES = { + "VISION": (const.IMAGE_CLF, const.OBJECT_DETECTION), + "NLP": (const.TXT_CLF, const.QNA), + } + + def __init__( + self, + api_type: str, + model: Callable, + preprocess_fn=None, + postprocess_fn=None, + preprocess_conf: Optional[dict] = None, + postprocess_conf: Optional[dict] = None, + **kwargs, + ): + if not preprocess_conf: + preprocess_conf = {} + if not postprocess_conf: + postprocess_conf = {} + + self.api_type = api_type.upper() + self.model = model + self.preprocess_conf = preprocess_conf + self.postprocess_conf = postprocess_conf + self.data_processor: Optional[DataProcessor] = self.set_data_processor( + preprocess_fn, postprocess_fn + ) + + @classmethod + def get_available_api_types(cls) -> List[str]: + return list(itertools.chain.from_iterable(cls.API_TYPES.values())) + + def set_data_processor( + self, preprocess_fn: Callable, postprocess_fn: Callable + ) -> DataProcessor: + data_preprocessor = self.set_default_processor() + if preprocess_fn: + data_preprocessor.set_preprocess_fn(preprocess_fn) + if postprocess_fn: + data_preprocessor.set_postprocess_fn(postprocess_fn) + return data_preprocessor + + def set_default_processor(self) -> DataProcessor: + api_type = self.api_type + if api_type in ModelServer.API_TYPES.get("VISION"): + self.data_processor = DefaultVisionProcessor.vision + elif api_type in ModelServer.API_TYPES.get("NLP"): + self.data_processor = DefaultTextProcessor.nlp + else: + raise NotImplementedError( + f"{api_type} is not implemented! Available types are -\ + {ModelServer.get_available_api_types()}" + ) + return self.data_processor diff --git a/chitra/serve/cloud/__init__.py b/chitra/serve/cloud/__init__.py new file mode 100644 index 00000000..034a376a --- /dev/null +++ b/chitra/serve/cloud/__init__.py @@ -0,0 +1 @@ +from .aws_serverless import ChaliceServer diff --git a/chitra/serve/cloud/aws_serverless.py b/chitra/serve/cloud/aws_serverless.py new file mode 100644 index 00000000..0aac33eb --- /dev/null +++ b/chitra/serve/cloud/aws_serverless.py @@ -0,0 +1,68 @@ +from typing import Callable, List, Optional + +from chalice import Chalice, Rate + +from chitra.logging import logger +from chitra.serve.cloud.base import CloudServer + +S3 = "s3" +GCS = "gcs" + +RATE_UNIT = {"m": Rate.MINUTES, "h": Rate.HOURS, "d": Rate.DAYS} + + +class ChaliceServer(CloudServer): + INVOKE_METHODS = ("route",) + + def __init__( + self, + api_type: str, + model_path: str, + model_loader: Callable, + preprocess_fn: Callable = None, + postprocess_fn: Callable = None, + **kwargs, + ): + super().__init__( + api_type, + model_path=model_path, + model_loader=model_loader, + preprocess_fn=preprocess_fn, + postprocess_fn=postprocess_fn, + **kwargs, + ) + + self.app = Chalice(app_name=kwargs.get("name", "chitra-server")) + + @staticmethod + def index(): + return {"hello": "world"} + + def predict(self) -> dict: + + data_processor = self.data_processor + x = self.app.current_request.raw_body + logger.debug(f"raw body type={type(x)}") + if data_processor.preprocess_fn: + x = data_processor.preprocess(x, **self.preprocess_conf) + x = self.model(x) + if data_processor.postprocess_fn: + x = data_processor.postprocess(x, **self.postprocess_conf) + return x + + def run(self, invoke_method: str, content_types: Optional[List] = None, **kwargs): + invoke_method = invoke_method.lower() + if not content_types: + content_types = [] + + if invoke_method not in self.INVOKE_METHODS: + raise NotImplementedError( + f"invoke method={invoke_method} not implemented yet. Please select {self.INVOKE_METHODS}" + ) + + if invoke_method == "route": + route_path = kwargs.get("path", "/predict") + self.app.route("/", methods=["GET"])(self.index) + self.app.route(route_path, methods=["POST"], content_types=content_types)( + self.predict + ) diff --git a/chitra/serve/cloud/base.py b/chitra/serve/cloud/base.py new file mode 100644 index 00000000..bc764be3 --- /dev/null +++ b/chitra/serve/cloud/base.py @@ -0,0 +1,47 @@ +import abc +import io +from abc import ABC +from typing import Callable, Optional + +import smart_open + +from chitra.serve.base import ModelServer + + +class CloudServer(ModelServer, ABC): + def __init__( + self, + api_type: str, + model_path: str, + model_loader: Callable, + preprocess_fn: Optional[Callable] = None, + postprocess_fn: Optional[Callable] = None, + **kwargs + ): + raw_model = self.download_model(model_path, **kwargs) + model = model_loader(raw_model) + + super().__init__( + api_type, + model, + preprocess_fn=preprocess_fn, + postprocess_fn=postprocess_fn, + **kwargs + ) + + @staticmethod + def download_model(path: str, **kwargs) -> io.BytesIO: + """ + Download model from cloud + ref: http://5.9.10.113/67706477/load-pytorch-model-from-s3-bucket + Args: + path: + **kwargs: + + Returns: + + """ + + with smart_open.open(path, mode="rb", **kwargs) as fr: + data = io.BytesIO(fr.read()) + return data diff --git a/chitra/serve/constants.py b/chitra/serve/constants.py new file mode 100644 index 00000000..457c975d --- /dev/null +++ b/chitra/serve/constants.py @@ -0,0 +1,4 @@ +IMAGE_CLF = "IMAGE-CLASSIFICATION" +OBJECT_DETECTION = "OBJECT-DETECTION" +TXT_CLF = "TEXT-CLASSIFICATION" +QNA = "QUESTION-ANS" diff --git a/chitra/serve/schema/__init__.py b/chitra/serve/schema/__init__.py new file mode 100644 index 00000000..62c86dd9 --- /dev/null +++ b/chitra/serve/schema/__init__.py @@ -0,0 +1 @@ +from chitra.serve.schema.text_models import QnARequest, QnAResponse, Query, QueryResult diff --git a/chitra/serve/schema/text_models.py b/chitra/serve/schema/text_models.py new file mode 100644 index 00000000..4c5ad0cc --- /dev/null +++ b/chitra/serve/schema/text_models.py @@ -0,0 +1,17 @@ +from pydantic import BaseModel + + +class Query(BaseModel): + query: str + + +class QueryResult(Query): + result: str + + +class QnARequest(Query): + question: str + + +class QnAResponse(QnARequest): + result: str diff --git a/docs/old_source/_data/terms.yml b/chitra/serve/tf_serving/__init__.py similarity index 100% rename from docs/old_source/_data/terms.yml rename to chitra/serve/tf_serving/__init__.py diff --git a/chitra/serve/tf_serving/client.py b/chitra/serve/tf_serving/client.py new file mode 100644 index 00000000..adeec7a5 --- /dev/null +++ b/chitra/serve/tf_serving/client.py @@ -0,0 +1,72 @@ +from typing import Any, Callable, Optional + +import grpc +import tensorflow as tf +from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc + + +def create_grpc_stub( + host: str = "localhost", port: str = "8500" +) -> prediction_service_pb2_grpc.PredictionServiceStub: + hostport = f"{host}:{port}" + channel = grpc.insecure_channel(hostport) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + return stub + + +def grpc_request( + stub: prediction_service_pb2_grpc.PredictionServiceStub, + data_sample: Any, + input_name: str, + model_name: str, + signature_name: str, + callback: Optional = None, + grpc_timeout: int = 20, + async_: bool = False, +): + request = predict_pb2.PredictRequest() + request.model_spec.name = model_name + request.model_spec.signature_name = signature_name + + request.inputs[input_name].CopyFrom( + tf.make_tensor_proto(data_sample, shape=data_sample.shape) + ) + + if async_: + result_future = stub.Predict.future(request, 5) # 5 seconds + else: + result_future = stub.Predict(request, grpc_timeout) + + if callback is not None: + return callback(result_future) + + return result_future + + +class GrpcClient: + def __init__(self, host: str = "localhost", port: str = "8500"): + super().__init__() + self.stub = create_grpc_stub(host, port) + + def request( + self, + data_sample, + input_name: str, + model_name: str, + signature_name: str, + callback: Callable = None, + grpc_timeout: int = 20, + async_: bool = False, + ): + stub = self.stub + response = grpc_request( + stub, + data_sample, + input_name, + model_name, + signature_name, + callback, + grpc_timeout, + async_, + ) + return response diff --git a/chitra/serve/torchserve/__init__.py b/chitra/serve/torchserve/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/chitra/tf_image.py b/chitra/tf_image.py deleted file mode 100644 index e52696b0..00000000 --- a/chitra/tf_image.py +++ /dev/null @@ -1,28 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_image.ipynb (unless otherwise specified). - -__all__ = ['read_image', 'resize_image'] - -# Cell -from typing import Union - -import tensorflow as tf - - -# Cell -def read_image(path: str, channels: int = 3): - """Reads an image file from the path and return the rgb image in tf.Tensor format.""" - img: tf.Tensor = tf.io.read_file(path) - img: tf.Tensor = tf.io.decode_image(img, - channels=channels, - expand_animations=False) - return img - - -def resize_image(image: tf.Tensor, size: Union[tf.Tensor, tuple], **kwargs): - """Resize image to the target `size`: Union[tf.Tensor, tuple]""" - assert isinstance( - image, - tf.Tensor), f"image must be of type tf.Tensor but passed {type(image)}" - assert isinstance(size, (tuple, tf.Tensor)) - method = kwargs.get("method", "bilinear") - return tf.image.resize(image, size, method) diff --git a/chitra/trainer.py b/chitra/trainer.py index b2c9928e..12238d65 100644 --- a/chitra/trainer.py +++ b/chitra/trainer.py @@ -1,16 +1,6 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_trainer.ipynb (unless otherwise specified). - -__all__ = [ - 'MODEL_DICT', 'OPT_DICT', 'create_classifier', 'create_cnn', 'Trainer', - 'InterpretModel', 'Learner' -] - -# Cell import inspect -# Cell from functools import partial -# Cell -from typing import Union +from typing import Any, List, Optional, Union import matplotlib.cm as cm import matplotlib.pyplot as plt @@ -18,13 +8,20 @@ import tensorflow as tf import tensorflow_addons as tfa from PIL import Image +from tensorflow import keras from tensorflow.keras.models import Model from tf_keras_vis.gradcam import Gradcam, GradcamPlusPlus from tf_keras_vis.utils import normalize from typeguard import check_argument_types, typechecked +from chitra.import_utils import is_installed + from .datagenerator import Dataset +pl = None +if is_installed("pytorch_lightning"): + import pytorch_lightning as pl + MODEL_DICT = {} for name, func in inspect.getmembers(tf.keras.applications): if inspect.isfunction(func): @@ -36,7 +33,6 @@ OPT_DICT[name.lower()] = func -# Cell @typechecked def _get_base_cnn( base_model: Union[str, Model], @@ -45,23 +41,25 @@ def _get_base_cnn( include_top: bool = False, ) -> Model: if isinstance(base_model, str): - assert (base_model in MODEL_DICT.keys() - ), f"base_model name must be in {tuple(MODEL_DICT.keys())}" + if base_model not in MODEL_DICT.keys(): + raise AssertionError( + f"base_model name must be in {tuple(MODEL_DICT.keys())}" + ) base_model = MODEL_DICT[base_model] - base_model = base_model(include_top=include_top, - pooling=pooling, - weights=weights) + base_model = base_model( + include_top=include_top, pooling=pooling, weights=weights + ) return base_model -# Cell @typechecked -def _add_output_layers(base_model: Model, - outputs: int, - drop_out: Union[float, None] = None, - name=None) -> Model: +def _add_output_layers( + base_model: Model, + outputs: int, + drop_out: Union[float, None] = None, + name: Optional[str] = None, +) -> Model: x = base_model.output - # x = tf.keras.layers.GlobalMaxPool2D()(x) if drop_out: x = tf.keras.layers.Dropout(drop_out)(x) x = tf.keras.layers.Dense(outputs, name="output")(x) @@ -70,37 +68,32 @@ def _add_output_layers(base_model: Model, return model -# Cell def create_classifier( base_model_fn: callable, num_classes: int, weights="imagenet", - dropout=0, + dropout: Optional[float] = None, include_top=False, - name=None, ): - outputs = 1 if num_classes == 2 else num_classes - base_model = base_model_fn( include_top=include_top, weights=weights, ) if include_top: return base_model - drop_out = 0.5 - outputs = 1 + + outputs = 1 if num_classes == 2 else num_classes x = base_model.output x = tf.keras.layers.GlobalMaxPool2D()(x) - if drop_out: - x = tf.keras.layers.Dropout(drop_out)(x) + if dropout: + x = tf.keras.layers.Dropout(dropout)(x) x = tf.keras.layers.Dense(outputs, name="output")(x) model = tf.keras.Model(base_model.input, x) return model -# Cell @typechecked def create_cnn( base_model: Union[str, Model], @@ -109,7 +102,7 @@ def create_cnn( keras_applications: bool = True, pooling: str = "avg", weights: Union[str, None] = "imagenet", - name=None, + name: Optional[str] = None, ) -> Model: assert pooling in ("avg", "max") @@ -119,18 +112,13 @@ def create_cnn( else: outputs = num_classes else: - print(f"num_classes is ignored. returning the passed model as it is.") + print("num_classes is ignored. returning the passed model as it is.") if isinstance(base_model, (str, Model)) and keras_applications: - base_model = _get_base_cnn(base_model, - pooling=pooling, - weights=weights) - assert ("pool" in base_model.layers[-1].name - ), f"base_model last layer must be a pooling layer" - model = _add_output_layers(base_model, - outputs, - drop_out=drop_out, - name=name) + base_model = _get_base_cnn(base_model, pooling=pooling, weights=weights) + if "pool" not in base_model.layers[-1].name: + raise AssertionError("base_model last layer must be a pooling layer") + model = _add_output_layers(base_model, outputs, drop_out=drop_out, name=name) elif isinstance(base_model, Model) and keras_applications is False: model = base_model @@ -139,31 +127,27 @@ def create_cnn( model = _get_base_cnn(base_model, weights="imagenet", include_top=True) else: - print(f"Invalid arguments!") + print("Invalid arguments!") return model -# Cell class Trainer(Model): - """ - The Trainer class inherits tf.keras.Model and contains everything a model needs for training. - It exposes trainer.cyclic_fit method which trains the model using Cyclic Learning rate discovered by Leslie Smith. + """The Trainer class inherits tf.keras.Model and contains everything a + model needs for training. It exposes trainer.cyclic_fit method which trains + the model using Cyclic Learning rate discovered by Leslie Smith. Arguments: ds: Dataset object model: object of type tf.keras.Model num_classes (int, None): number of classes in the dataset. If None then will auto infer from Dataset - """ _AUTOTUNE = tf.data.experimental.AUTOTUNE @typechecked - def __init__(self, - ds: Dataset, - model: Model, - num_classes: Union[int, None] = None, - **kwargs): + def __init__( + self, ds: Dataset, model: Model, num_classes: Union[int, None] = None, **kwargs + ): assert check_argument_types() super(Trainer, self).__init__() @@ -176,15 +160,18 @@ def __init__(self, self.gradcam = None self.model = model self.cyclic_opt_set = False + self.max_lr, self.min_lr = None, None + self.batch_size = None + self.step_size = None def build(self): - pass + raise NotImplementedError( + 'Build method is not implemented in Trainer! Please use "model.model.build" instead.' + ) def summary(self): return self.model.summary() - # def get_layer(name=None, index=None): return self.model(name, index) - def compile(self, *args, **kwargs): return self.model.compile(*args, **kwargs) @@ -195,9 +182,12 @@ def fit(self, *args, **kwargs): return self.model.fit(*args, **kwargs) def warmup(self): - pass + raise NotImplementedError( + "warmup is not implemented yet! Would you like to raise a PR to chitra?" + ) - def prewhiten(self, image): + @staticmethod + def prewhiten(image): image = tf.cast(image, tf.float32) image = image / 127.5 - 1.0 return image @@ -208,17 +198,16 @@ def rescale(self, image, label): def _get_optimizer(self, optimizer, momentum=0.9, **kwargs): if optimizer.__name__ == "SGD": - optimizer = partial(optimizer, - momentum=momentum, - nesterov=kwargs.get("nesterov", True)) + optimizer = partial( + optimizer, momentum=momentum, nesterov=kwargs.get("nesterov", True) + ) else: optimizer = partial( optimizer, - momentum=momentum, ) return optimizer - def _prepare_dl(self, bs=8, shuffle=True): + def _prepare_dl(self, bs: int = 8, shuffle: bool = True): ds = self.ds dl = ds.get_tf_dataset(shuffle=shuffle) dl = dl.map(self.rescale, Trainer._AUTOTUNE) @@ -229,36 +218,40 @@ def cyclic_fit( epochs: int, batch_size: int, lr_range: Union[tuple, list] = (1e-4, 1e-2), - optimizer=tf.keras.optimizers.SGD, - momentum=0.9, - validation_data=None, - callbacks=None, + optimizer: tf.keras.optimizers.Optimizer = tf.keras.optimizers.SGD, + momentum: float = 0.9, + validation_data: Any = None, + callbacks: Optional[List] = None, *args, **kwargs, ): - """Trains model on ds as train data with cyclic learning rate. - Dataset will be automatically converted into `tf.data` format and images will be prewhitened in range of [-1, 1]. - Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 + """Trains model on ds as train data with cyclic learning rate. Dataset + will be automatically converted into `tf.data` format and images will + be prewhitened in range of [-1, 1]. Cyclical Learning Rates for + Training Neural Networks: https://arxiv.org/abs/1506.01186. Args: epochs (int): number of epochs for training batch_size (int): batch size lr_range (tuple): learning rate will cycle from lr_min to lr_max optimizer (callable): Keras callable optimizer - momentum(int): momentum for the optimizer + momentum(float): momentum for the optimizer + validation_data: Data on which to evaluate + callbacks: List of `tf.keras.callbacks` instances. kwargs: - step_size (int): step size for the Cyclic learning rate. By default it is `2 * len(self.ds)//batch_size` + step_size (int): step size for the Cyclic learning rate. + By default it is `2 * (self.ds.num_files//batch_size)` scale_mode (str): cycle or exp shuffle(bool): Dataset will be shuffle on each epoch if True """ + self.step_size = 2 * (self.ds.num_files // batch_size) + self.ds.step_size = self.step_size if not self.cyclic_opt_set: self.max_lr, self.min_lr = lr_range - ds = self.ds - step_size = 2 * len(self.ds) // batch_size lr_schedule = tfa.optimizers.Triangular2CyclicalLearningRate( initial_learning_rate=lr_range[0], maximal_learning_rate=lr_range[1], - step_size=kwargs.get("step_size", step_size), + step_size=kwargs.get("step_size", self.step_size), scale_mode=kwargs.get("scale_mode", "cycle"), ) @@ -271,6 +264,7 @@ def cyclic_fit( return self.model.fit( self._prepare_dl(batch_size, kwargs.get("shuffle", True)), + steps_per_epoch=self.step_size, validation_data=validation_data, epochs=epochs, callbacks=callbacks, @@ -280,17 +274,15 @@ def cyclic_fit( def compile2( self, batch_size: int, - optimizer: Union[str, tf.keras.optimizers.Optimizer] = "adam", + optimizer: Union[None, str, tf.keras.optimizers.Optimizer] = None, lr_range: Union[tuple, list] = (1e-4, 1e-2), - loss=None, + loss: Optional[tf.keras.losses.Loss] = None, metrics=None, - loss_weights=None, - weighted_metrics=None, - run_eagerly=None, **kwargs, ): """Compile2 compiles the model of Trainer for cyclic learning rate. - Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 + Cyclical Learning Rates for Training Neural Networks: + https://arxiv.org/abs/1506.01186. Args: batch_size (int): batch size @@ -298,19 +290,22 @@ def compile2( optimizer (str, keras.optimizer.Optimizer): Keras optimizer kwargs: - step_size (int): step size for the Cyclic learning rate. By default it is `2 * len(self.ds)//batch_size` + step_size (int): step size for the Cyclic learning rate. + By default it is `2 * (self.ds.num_files // batch_size)` scale_mode (str): cycle or exp momentum(int): momentum for the optimizer when optimizer is of type str """ + if not optimizer: + optimizer = "adam" self.max_lr, self.min_lr = lr_range self.batch_size = batch_size - self.step_size = step_size = 2 * len(self.ds) // batch_size + self.step_size = 2 * (self.ds.num_files // batch_size) lr_schedule = tfa.optimizers.Triangular2CyclicalLearningRate( initial_learning_rate=lr_range[0], maximal_learning_rate=lr_range[1], - step_size=kwargs.get("step_size", step_size), + step_size=kwargs.get("step_size", self.step_size), scale_mode=kwargs.get("scale_mode", "cycle"), ) @@ -325,15 +320,11 @@ def compile2( self.compile(optimizer=optimizer, loss=loss, metrics=metrics) self.cyclic_opt_set = True - print(f"Model compiled!") + print("Model compiled!") -# Cell class InterpretModel: - def __init__(self, - gradcam_pp: bool, - learner: Trainer, - clone: bool = False): + def __init__(self, gradcam_pp: bool, learner: Trainer, clone: bool = False): """Args: gradcam_pp: if True GradCam class will be used else GradCamPlusplus clone: whether GradCam will clone learner.model @@ -344,18 +335,11 @@ def __init__(self, self.gradcam_fn = Gradcam self.learner = learner - self.gradcam = self.gradcam_fn(learner.model, - self.model_modifier, - clone=clone) - - # if self.learner.include_top is not True: - # self.gradcam._find_penultimate_output = self.patch + self.gradcam = self.gradcam_fn(learner.model, self.model_modifier, clone=clone) - def __call__(self, - image: Image.Image, - auto_resize: bool = True, - image_size=None): - # assert check_argument_types() + def __call__( + self, image: Image.Image, auto_resize: bool = True, image_size=None + ) -> None: gradcam = self.gradcam get_loss = self.get_loss if auto_resize and image_size is None: @@ -379,14 +363,9 @@ def __call__(self, plt.imshow(heatmap, cmap="jet", alpha=0.5) plt.show() - def __patch(self, *args, **kwargs): - """Path _find_penultimate_output method of tf_keras_vis""" - if self.learner.include_top: - return self.learner.model.layers[-1].output - return self.learner.model.layers[0].get_output_at(-1) - - def model_modifier(self, m): - """Sets last activation to linear""" + @staticmethod + def model_modifier(m): + """Sets last activation to linear.""" m.layers[-1].activation = tf.keras.activations.linear return m @@ -395,29 +374,18 @@ def get_loss(self, preds): ret = preds[0] else: index = tf.argmax(tf.math.softmax(preds), axis=1)[0] - # print(index, preds.shape) ret = preds[0, index] print(f"index: {index}") return ret -# Cell -from typing import Union - -import pytorch_lightning as pl -from tensorflow import keras - -from .converter.core import pytorch_to_onnx, tf2_to_onnx - - -# Cell class Learner: TF = ("TF", "TENSORFLOW") PT = ("PYTORCH", "PT", "TORCH") - def __init__(self, - model: Union[pl.LightningModule, keras.models.Model], - mode: str = "TF"): + def __init__( + self, model: Union["pl.LightningModule", "keras.models.Model"], mode: str = "TF" + ): self.MODE = mode.upper() self.model = model self.epochs_trained = 0 @@ -425,18 +393,18 @@ def __init__(self, if self.MODE in Learner.PT: self.trainer = None - def fit(self, - train_data, - epochs, - val_data=None, - test_data=None, - callbacks=None, - **kwargs): - """train models - For TF: - Just pass train data and start training - For PyTorch: - You can enter configs to Lightning Trainer + def fit( + self, + train_data, + epochs, + val_data=None, + callbacks=None, + **kwargs, + ): + """train models For TF: + + Just pass train data and start training For PyTorch: You can + enter configs to Lightning Trainer """ MODE = self.MODE initial_epoch = self.epochs_trained @@ -450,16 +418,8 @@ def fit(self, validation_data=val_data, callbacks=callbacks, ) - elif MODE in Learner.PT: - lit_confs = kwargs.get('LIT_TRAINER_CONFIG', {}) + if MODE in Learner.PT: + lit_confs = kwargs.get("LIT_TRAINER_CONFIG", {}) if not self.trainer: self.trainer = pl.Trainer(max_epochs=epochs, **lit_confs) return self.trainer.fit(self.model, train_data, val_data) - - def to_onnx(self, tensor=None, export_path=None): - MODE = self.MODE - if MODE in Learner.TF: - return tf2_to_onnx(self.model, output_path=export_path) - - if MODE in Learner.PT: - return pytorch_to_onnx(self.model, tensor, export_path) diff --git a/chitra/utility/import_utils.py b/chitra/utility/import_utils.py deleted file mode 100644 index 2babd235..00000000 --- a/chitra/utility/import_utils.py +++ /dev/null @@ -1,15 +0,0 @@ -__all__ = ['is_installed', 'INSTALLED_MODULES'] - -import importlib - -from chitra.constants import _FLAX, _JAX, _TF, _TF_GPU, _TORCH, _TORCHVISION - - -def is_installed(module_name: str): - return importlib.util.find_spec(module_name) is not None - - -INSTALLED_MODULES = { - module: is_installed(module) - for module in (_TF, _TF_GPU, _TORCH, _TORCHVISION, _JAX, _FLAX) -} diff --git a/chitra/utility/tf_utils.py b/chitra/utility/tf_utils.py index e72a4866..e7da6698 100644 --- a/chitra/utility/tf_utils.py +++ b/chitra/utility/tf_utils.py @@ -1,61 +1,64 @@ -__all__ = ['disable_gpu', 'limit_gpu', 'gpu_dynamic_mem_growth'] - import os import tensorflow as tf def disable_gpu(): - """ - disable gpu for tensorflow - """ + """disable gpu for tensorflow.""" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(-1) -# export +def get_basename(path: tf.string) -> tf.string: + if not isinstance(path, tf.Tensor): + raise AssertionError + return tf.strings.split(path, os.path.sep)[-1] + + def limit_gpu(gpu_id: int, memory_limit: int): - """ - limit the selected gpu [gpu_id] by [memory_limit] MB - """ + """limit the selected gpu [gpu_id] by [memory_limit] MB.""" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) - gpus = tf.config.list_physical_devices('GPU') + gpus = tf.config.list_physical_devices("GPU") + if len(gpus) >= gpu_id + 1: + raise AssertionError if gpus: # Restrict TensorFlow to only allocate [memory MB] of memory on the first GPU try: tf.config.experimental.set_virtual_device_configuration( - gpus[0], [ + gpus[gpu_id], + [ tf.config.experimental.VirtualDeviceConfiguration( - memory_limit=memory_limit) - ]) - logical_gpus = tf.config.list_logical_devices('GPU') - print(len(gpus), "Physical GPUs,", len(logical_gpus), - "Logical GPUs") + memory_limit=memory_limit + ) + ], + ) + logical_gpus = tf.config.list_logical_devices("GPU") + print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) else: - print(f'No GPU:{gpu_id} found in your system!') + print(f"No GPU:{gpu_id} found in your system!") -# Cell def gpu_dynamic_mem_growth(): + """Borrowed from + https://github.com/philipperemy/keract/tree/master/examples. + + Check for GPUs and set them to dynamically grow memory as needed + Avoids OOM from tensorflow greedily allocating GPU memory """ - Borrowed from https://github.com/philipperemy/keract/tree/master/examples - """ - # Check for GPUs and set them to dynamically grow memory as needed - # Avoids OOM from tensorflow greedily allocating GPU memory try: - gpu_devices = tf.config.list_physical_devices('GPU') + gpu_devices = tf.config.list_physical_devices("GPU") if len(gpu_devices) > 0: for gpu in gpu_devices: tf.config.experimental.set_memory_growth(gpu, True) - print('GPU dynamic memory growth enabled') + print("GPU dynamic memory growth enabled") else: - print('No GPU found on the machine!') + print("No GPU found on the machine!") except AttributeError: print( - 'Upgrade your tensorflow to 2.x to have the gpu_dynamic_mem_growth feature.' + "Upgrade your tensorflow to 2.x to have the gpu_dynamic_mem_growth feature." ) diff --git a/chitra/visualization.py b/chitra/visualization.py deleted file mode 100644 index 130aae45..00000000 --- a/chitra/visualization.py +++ /dev/null @@ -1,184 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_visualization.ipynb (unless otherwise specified). - -__all__ = [ - 'label_color', 'colors', 'colors', 'draw_box', 'draw_caption', - 'draw_boxes', 'draw_detections', 'draw_annotations' -] - -# Cell -""" -Copyright 2017-2018 Fizyr (https://fizyr.com) - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -# Cell -import warnings - - -# Cell -def label_color(label): - """ Return a color from a set of predefined colors. Contains 80 colors in total. - - Args - label: The label to get the color for. - - Returns - A list of three values representing a RGB color. - - If no color is defined for a certain label, the color green is returned and a warning is printed. - """ - if label < len(colors): - return colors[label] - else: - warnings.warn( - 'Label {} has no color, returning default.'.format(label)) - return (0, 255, 0) - - -""" -Generated using: - -``` -colors = [list((matplotlib.colors.hsv_to_rgb([x, 1.0, 1.0]) * 255).astype(int)) for x in np.arange(0, 1, 1.0 / 80)] -shuffle(colors) -pprint(colors) -``` -""" -colors = [[31, 0, 255], [0, 159, 255], [255, 95, 0], [255, 19, 0], [255, 0, 0], - [255, 38, 0], [0, 255, 25], [255, 0, 133], [255, 172, 0], - [108, 0, 255], [0, 82, 255], [0, 255, 6], [255, 0, - 152], [223, 0, 255], - [12, 0, 255], [0, 255, 178], [108, 255, 0], [184, 0, 255], - [255, 0, 76], [146, 255, 0], [51, 0, 255], [0, 197, 255], - [255, 248, 0], [255, 0, 19], [255, 0, 38], [89, 255, 0], - [127, 255, 0], [255, 153, 0], [0, 255, 255], [0, 255, 216], - [0, 255, 121], [255, 0, 248], [70, 0, 255], [0, 255, 159], - [0, 216, 255], [0, 6, 255], [0, 63, 255], [31, 255, 0], [255, 57, 0], - [255, 0, 210], [0, 255, 102], [242, 255, 0], [255, 191, 0], - [0, 255, 63], [255, 0, 95], [146, 0, 255], [184, 255, 0], - [255, 114, 0], [0, 255, 235], [255, 229, 0], [0, 178, 255], - [255, 0, 114], [255, 0, 57], [0, 140, 255], [0, 121, 255], - [12, 255, 0], [255, 210, 0], [0, 255, 44], [165, 255, - 0], [0, 25, 255], - [0, 255, 140], [0, 101, 255], [0, 255, 82], [223, 255, 0], - [242, 0, 255], [89, 0, 255], [165, 0, 255], [70, 255, 0], - [255, 0, 172], [255, 76, 0], [203, 255, 0], [204, 0, 255], - [255, 0, 229], [255, 133, 0], [127, 0, 255], [0, 235, 255], - [0, 255, 197], [255, 0, 191], [0, 44, 255], [50, 255, 0]] - -# Cell -import cv2 -import numpy as np - - -def draw_box(image, box, color, thickness=2): - """ Draws a box on an image with a given color. - - # Arguments - image : The image to draw on. - box : A list of 4 elements (x1, y1, x2, y2). - color : The color of the box. - thickness : The thickness of the lines to draw a box with. - """ - b = np.array(box).astype(np.int32) - cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, - cv2.LINE_AA) - - -def draw_caption(image, box, caption): - """ Draws a caption above the box in an image. - - # Arguments - image : The image to draw on. - box : A list of 4 elements (x1, y1, x2, y2). - caption : String containing the text to draw. - """ - b = np.array(box).astype(int) - cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, - (0, 0, 0), 2) - cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, - (255, 255, 255), 1) - - -def draw_boxes(image, boxes, color, thickness=2): - """ Draws boxes on an image with a given color. - - # Arguments - image : The image to draw on. - boxes : A [N, 4] matrix (x1, y1, x2, y2). - color : The color of the boxes. - thickness : The thickness of the lines to draw boxes with. - """ - for b in boxes: - draw_box(image, b, color, thickness=thickness) - - -def draw_detections(image, - boxes, - scores, - labels, - color=None, - label_to_name=None, - score_threshold=0.5): - """ Draws detections in an image. - - # Arguments - image : The image to draw on. - boxes : A [N, 4] matrix (x1, y1, x2, y2). - scores : A list of N classification scores. - labels : A list of N labels. - color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used. - label_to_name : (optional) Functor for mapping a label to a name. - score_threshold : Threshold used for determining what detections to draw. - """ - selection = np.where(scores > score_threshold)[0] - - for i in selection: - c = color if color is not None else label_color(labels[i]) - draw_box(image, boxes[i, :], color=c) - - # draw labels - caption = (label_to_name(labels[i]) if label_to_name else - labels[i]) + ': {0:.2f}'.format(scores[i]) - draw_caption(image, boxes[i, :], caption) - - -def draw_annotations(image, - annotations, - color=(0, 255, 0), - label_to_name=None): - """ Draws annotations in an image. - - # Arguments - image : The image to draw on. - annotations : A [N, 5] matrix (x1, y1, x2, y2, label) or dictionary containing bboxes (shaped [N, 4]) and labels (shaped [N]). - color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used. - label_to_name : (optional) Functor for mapping a label to a name. - """ - if isinstance(annotations, np.ndarray): - annotations = { - 'bboxes': annotations[:, :4], - 'labels': annotations[:, 4] - } - - assert ('bboxes' in annotations) - assert ('labels' in annotations) - assert (annotations['bboxes'].shape[0] == annotations['labels'].shape[0]) - - for i in range(annotations['bboxes'].shape[0]): - label = annotations['labels'][i] - c = color if color is not None else label_color(label) - caption = '{}'.format(label_to_name(label) if label_to_name else label) - draw_caption(image, annotations['bboxes'][i], caption) - draw_box(image, annotations['bboxes'][i], color=c) diff --git a/chitra/visualization/__init__.py b/chitra/visualization/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/chitra/visualization/metrics.py b/chitra/visualization/metrics.py new file mode 100644 index 00000000..4cdef62d --- /dev/null +++ b/chitra/visualization/metrics.py @@ -0,0 +1,65 @@ +"""https://www.kaggle.com/grfiv4/plot-a-confusion-matrix.""" +from itertools import product +from typing import List, Union + +import matplotlib.pyplot as plt +import numpy as np +from sklearn.metrics import confusion_matrix + +from chitra.logging import logger + + +def detect_multilabel(labels: Union[List, np.ndarray]): + if isinstance(labels, (np.ndarray, list, tuple)): + labels = np.unique(labels) + else: + raise UserWarning( + f"expect data type for label was list or np.ndarray but got {type(labels)}" + ) + + if len(labels) > 2: + return True + return False + + +def cm_accuracy(cm: np.ndarray): + return np.trace(cm) / float(np.sum(cm)) + + +def plot_confusion_matrix( + y_pred: Union[np.ndarray, List], + y_true: Union[np.ndarray, List], + display_labels=None, + include_values: bool = True, + title: str = "Confusion Matrix", + cmap: str = None, +): + if detect_multilabel(y_true): + logger.warning("You might want to use multi-label version!") + + if display_labels is None: + display_labels = np.unique(y_true) + + n_classes = len(display_labels) + tick_marks = np.arange(n_classes) + + if cmap is None: + cmap = plt.get_cmap("Blues") + + cm = confusion_matrix(y_true, y_pred) + accuracy = cm_accuracy(cm) + error = 1 - accuracy + + plt.imshow(cm, cmap=cmap) + + if include_values: + for i, j in product(range(n_classes), range(n_classes)): + plt.text(i, j, "{:,}".format(cm[i, j])) + + plt.xticks(tick_marks, display_labels, rotation=45) + plt.yticks(tick_marks, display_labels) + plt.title(title) + plt.xlabel(f"Predicted Label\nAccuracy={accuracy:0.4f}; Error={error:0.4f}") + plt.ylabel("True Label") + + plt.show() diff --git a/chitra_banner.png b/chitra_banner.png index 3b515a40..40eaa7aa 100644 Binary files a/chitra_banner.png and b/chitra_banner.png differ diff --git a/docs/.gitignore b/docs/.gitignore deleted file mode 100644 index 57510a2b..00000000 --- a/docs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_site/ diff --git a/docs/CNAME b/docs/CNAME new file mode 100644 index 00000000..2cb11a1a --- /dev/null +++ b/docs/CNAME @@ -0,0 +1 @@ +chitra.aniketmaurya.com diff --git a/docs/_config.yml b/docs/_config.yml deleted file mode 100644 index 2bad5672..00000000 --- a/docs/_config.yml +++ /dev/null @@ -1,71 +0,0 @@ -repository: "aniketmaurya/chitra" -output: web -topnav_title: "chitra" -site_title: "chitra" -company_name: "Aniket Maurya" -description: "Deep Learning Computer Vision library for easy data loading, model building and interpretation" -# Set to false to disable KaTeX math -use_math: true -# Add Google analytics id if you have one and want to use it here -google_analytics: -# See http://nbdev.fast.ai/search for help with adding Search -google_search: 350c3ff2aa64c8a64 - -host: 127.0.0.1 -# the preview server used. Leave as is. -port: 4000 -# the port where the preview is rendered. - -exclude: - - .idea/ - - .gitignore - - vendor - -exclude: [vendor] - -highlighter: rouge -markdown: kramdown -kramdown: - input: GFM - auto_ids: true - hard_wrap: false - syntax_highlighter: rouge - -collections: - tooltips: - output: false - -defaults: - - - scope: - path: "" - type: "pages" - values: - layout: "page" - comments: true - search: true - sidebar: home_sidebar - topnav: topnav - - - scope: - path: "" - type: "tooltips" - values: - layout: "page" - comments: true - search: true - tooltip: true - -sidebars: [home_sidebar] -permalink: pretty - -# Github and twitter are optional: -minima: - social_links: - twitter: aniketmaurya - github: aniketmaurya - -# theme: jekyll-theme-cayman -# url: "http://chitra.aniketmaurya.ml" # the base hostname & protocol for your site, e.g. http://example.com - -baseurl: / \ No newline at end of file diff --git a/docs/assets/chitra_banner_0.1.0.png b/docs/assets/chitra_banner_0.1.0.png new file mode 100644 index 00000000..40eaa7aa Binary files /dev/null and b/docs/assets/chitra_banner_0.1.0.png differ diff --git a/docs/assets/favicon/android-chrome-192x192.png b/docs/assets/favicon/android-chrome-192x192.png index 8a99e26a..e1a2cc54 100644 Binary files a/docs/assets/favicon/android-chrome-192x192.png and b/docs/assets/favicon/android-chrome-192x192.png differ diff --git a/docs/assets/favicon/android-chrome-512x512.png b/docs/assets/favicon/android-chrome-512x512.png index a3df6c58..10d3e0c4 100644 Binary files a/docs/assets/favicon/android-chrome-512x512.png and b/docs/assets/favicon/android-chrome-512x512.png differ diff --git a/docs/assets/favicon/apple-touch-icon.png b/docs/assets/favicon/apple-touch-icon.png index 51937522..683e610e 100644 Binary files a/docs/assets/favicon/apple-touch-icon.png and b/docs/assets/favicon/apple-touch-icon.png differ diff --git a/docs/assets/favicon/favicon-16x16.png b/docs/assets/favicon/favicon-16x16.png index 8c6fded4..0ab42dca 100644 Binary files a/docs/assets/favicon/favicon-16x16.png and b/docs/assets/favicon/favicon-16x16.png differ diff --git a/docs/assets/favicon/favicon-32x32.png b/docs/assets/favicon/favicon-32x32.png index 1bbf9855..e73bc667 100644 Binary files a/docs/assets/favicon/favicon-32x32.png and b/docs/assets/favicon/favicon-32x32.png differ diff --git a/docs/assets/favicon/favicon.ico b/docs/assets/favicon/favicon.ico index 822c86b0..4870dfc5 100644 Binary files a/docs/assets/favicon/favicon.ico and b/docs/assets/favicon/favicon.ico differ diff --git a/docs/assets/favicon/site.webmanifest b/docs/assets/favicon/site.webmanifest index 45dc8a20..1dd91123 100644 --- a/docs/assets/favicon/site.webmanifest +++ b/docs/assets/favicon/site.webmanifest @@ -1 +1 @@ -{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} \ No newline at end of file +{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} diff --git a/docs/assets/images/chitra-arch.png b/docs/assets/images/chitra-arch.png new file mode 100644 index 00000000..ffc5e465 Binary files /dev/null and b/docs/assets/images/chitra-arch.png differ diff --git a/docs/assets/images/chitra-arch.svg b/docs/assets/images/chitra-arch.svg new file mode 100644 index 00000000..f3e0e1d9 --- /dev/null +++ b/docs/assets/images/chitra-arch.svg @@ -0,0 +1 @@ + diff --git a/docs/old_source/images/company_logo-big.png b/docs/assets/images/company_logo-big.png similarity index 100% rename from docs/old_source/images/company_logo-big.png rename to docs/assets/images/company_logo-big.png diff --git a/docs/old_source/images/company_logo.png b/docs/assets/images/company_logo.png similarity index 100% rename from docs/old_source/images/company_logo.png rename to docs/assets/images/company_logo.png diff --git a/docs/old_source/images/doc_example.png b/docs/assets/images/doc_example.png similarity index 100% rename from docs/old_source/images/doc_example.png rename to docs/assets/images/doc_example.png diff --git a/docs/old_source/images/export_example.png b/docs/assets/images/export_example.png similarity index 100% rename from docs/old_source/images/export_example.png rename to docs/assets/images/export_example.png diff --git a/docs/old_source/images/favicon.ico b/docs/assets/images/favicon.ico similarity index 100% rename from docs/old_source/images/favicon.ico rename to docs/assets/images/favicon.ico diff --git a/docs/old_source/images/output_10_1.png b/docs/assets/images/output_10_1.png similarity index 100% rename from docs/old_source/images/output_10_1.png rename to docs/assets/images/output_10_1.png diff --git a/docs/old_source/images/output_11_1.png b/docs/assets/images/output_11_1.png similarity index 100% rename from docs/old_source/images/output_11_1.png rename to docs/assets/images/output_11_1.png diff --git a/docs/old_source/images/output_20_1.png b/docs/assets/images/output_20_1.png similarity index 100% rename from docs/old_source/images/output_20_1.png rename to docs/assets/images/output_20_1.png diff --git a/docs/assets/images/output_21_1.png b/docs/assets/images/output_21_1.png new file mode 100644 index 00000000..5d3c290f Binary files /dev/null and b/docs/assets/images/output_21_1.png differ diff --git a/docs/old_source/images/output_22_0.png b/docs/assets/images/output_22_0.png similarity index 100% rename from docs/old_source/images/output_22_0.png rename to docs/assets/images/output_22_0.png diff --git a/docs/assets/images/output_22_1.png b/docs/assets/images/output_22_1.png new file mode 100644 index 00000000..5d3c290f Binary files /dev/null and b/docs/assets/images/output_22_1.png differ diff --git a/docs/assets/images/output_23_0.png b/docs/assets/images/output_23_0.png new file mode 100644 index 00000000..3488598a Binary files /dev/null and b/docs/assets/images/output_23_0.png differ diff --git a/docs/assets/images/output_24_0.png b/docs/assets/images/output_24_0.png new file mode 100644 index 00000000..3488598a Binary files /dev/null and b/docs/assets/images/output_24_0.png differ diff --git a/docs/old_source/images/output_24_1.png b/docs/assets/images/output_24_1.png similarity index 100% rename from docs/old_source/images/output_24_1.png rename to docs/assets/images/output_24_1.png diff --git a/docs/old_source/images/output_26_0.png b/docs/assets/images/output_26_0.png similarity index 100% rename from docs/old_source/images/output_26_0.png rename to docs/assets/images/output_26_0.png diff --git a/docs/old_source/images/output_28_1.png b/docs/assets/images/output_28_1.png similarity index 100% rename from docs/old_source/images/output_28_1.png rename to docs/assets/images/output_28_1.png diff --git a/docs/old_source/images/output_30_1.png b/docs/assets/images/output_30_1.png similarity index 100% rename from docs/old_source/images/output_30_1.png rename to docs/assets/images/output_30_1.png diff --git a/docs/old_source/images/output_3_1.png b/docs/assets/images/output_3_1.png similarity index 100% rename from docs/old_source/images/output_3_1.png rename to docs/assets/images/output_3_1.png diff --git a/docs/old_source/images/output_4_0.png b/docs/assets/images/output_4_0.png similarity index 100% rename from docs/old_source/images/output_4_0.png rename to docs/assets/images/output_4_0.png diff --git a/docs/old_source/images/output_4_1.png b/docs/assets/images/output_4_1.png similarity index 100% rename from docs/old_source/images/output_4_1.png rename to docs/assets/images/output_4_1.png diff --git a/docs/old_source/images/output_4_1.svg b/docs/assets/images/output_4_1.svg similarity index 99% rename from docs/old_source/images/output_4_1.svg rename to docs/assets/images/output_4_1.svg index 93d137c3..c74c792b 100644 --- a/docs/old_source/images/output_4_1.svg +++ b/docs/assets/images/output_4_1.svg @@ -1 +1 @@ -2020-10-14T21:29:26.338372image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ \ No newline at end of file +2020-10-14T21:29:26.338372image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ diff --git a/docs/assets/images/output_5_0.png b/docs/assets/images/output_5_0.png new file mode 100644 index 00000000..de14aab3 Binary files /dev/null and b/docs/assets/images/output_5_0.png differ diff --git a/docs/old_source/images/output_5_0.svg b/docs/assets/images/output_5_0.svg similarity index 99% rename from docs/old_source/images/output_5_0.svg rename to docs/assets/images/output_5_0.svg index 625a2c8e..58731cdf 100644 --- a/docs/old_source/images/output_5_0.svg +++ b/docs/assets/images/output_5_0.svg @@ -1 +1 @@ -2020-10-14T21:29:27.001433image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ \ No newline at end of file +2020-10-14T21:29:27.001433image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ diff --git a/docs/old_source/images/output_5_1.png b/docs/assets/images/output_5_1.png similarity index 100% rename from docs/old_source/images/output_5_1.png rename to docs/assets/images/output_5_1.png diff --git a/docs/old_source/images/output_5_1.svg b/docs/assets/images/output_5_1.svg similarity index 99% rename from docs/old_source/images/output_5_1.svg rename to docs/assets/images/output_5_1.svg index 93d137c3..c74c792b 100644 --- a/docs/old_source/images/output_5_1.svg +++ b/docs/assets/images/output_5_1.svg @@ -1 +1 @@ -2020-10-14T21:29:26.338372image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ \ No newline at end of file +2020-10-14T21:29:26.338372image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ diff --git a/docs/assets/images/output_6_0.png b/docs/assets/images/output_6_0.png new file mode 100644 index 00000000..de14aab3 Binary files /dev/null and b/docs/assets/images/output_6_0.png differ diff --git a/docs/old_source/images/output_6_0.svg b/docs/assets/images/output_6_0.svg similarity index 99% rename from docs/old_source/images/output_6_0.svg rename to docs/assets/images/output_6_0.svg index 625a2c8e..58731cdf 100644 --- a/docs/old_source/images/output_6_0.svg +++ b/docs/assets/images/output_6_0.svg @@ -1 +1 @@ -2020-10-14T21:29:27.001433image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ \ No newline at end of file +2020-10-14T21:29:27.001433image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ diff --git a/docs/old_source/images/output_6_1.png b/docs/assets/images/output_6_1.png similarity index 100% rename from docs/old_source/images/output_6_1.png rename to docs/assets/images/output_6_1.png diff --git a/docs/old_source/images/output_7_0.png b/docs/assets/images/output_7_0.png similarity index 100% rename from docs/old_source/images/output_7_0.png rename to docs/assets/images/output_7_0.png diff --git a/docs/old_source/images/output_7_1.png b/docs/assets/images/output_7_1.png similarity index 100% rename from docs/old_source/images/output_7_1.png rename to docs/assets/images/output_7_1.png diff --git a/docs/old_source/images/output_8_0.png b/docs/assets/images/output_8_0.png similarity index 100% rename from docs/old_source/images/output_8_0.png rename to docs/assets/images/output_8_0.png diff --git a/docs/old_source/images/output_8_0.svg b/docs/assets/images/output_8_0.svg similarity index 99% rename from docs/old_source/images/output_8_0.svg rename to docs/assets/images/output_8_0.svg index c6072520..268a5f1b 100644 --- a/docs/old_source/images/output_8_0.svg +++ b/docs/assets/images/output_8_0.svg @@ -1 +1 @@ -2020-10-14T21:29:32.735037image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ \ No newline at end of file +2020-10-14T21:29:32.735037image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ diff --git a/docs/old_source/images/output_9_0.png b/docs/assets/images/output_9_0.png similarity index 100% rename from docs/old_source/images/output_9_0.png rename to docs/assets/images/output_9_0.png diff --git a/docs/old_source/images/output_9_0.svg b/docs/assets/images/output_9_0.svg similarity index 99% rename from docs/old_source/images/output_9_0.svg rename to docs/assets/images/output_9_0.svg index c6072520..268a5f1b 100644 --- a/docs/old_source/images/output_9_0.svg +++ b/docs/assets/images/output_9_0.svg @@ -1 +1 @@ -2020-10-14T21:29:32.735037image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ \ No newline at end of file +2020-10-14T21:29:32.735037image/svg+xmlMatplotlib v3.3.1, https://matplotlib.org/ diff --git a/docs/old_source/images/output_9_1.png b/docs/assets/images/output_9_1.png similarity index 100% rename from docs/old_source/images/output_9_1.png rename to docs/assets/images/output_9_1.png diff --git a/docs/assets/images/preview-bounding-box.png b/docs/assets/images/preview-bounding-box.png new file mode 100644 index 00000000..04196a33 Binary files /dev/null and b/docs/assets/images/preview-bounding-box.png differ diff --git a/docs/old_source/images/workflowarrow.png b/docs/assets/images/workflowarrow.png similarity index 100% rename from docs/old_source/images/workflowarrow.png rename to docs/assets/images/workflowarrow.png diff --git a/docs/assets/logo 0.1.0.png b/docs/assets/logo 0.1.0.png new file mode 100644 index 00000000..809c2c20 Binary files /dev/null and b/docs/assets/logo 0.1.0.png differ diff --git a/docs/examples/chitra-class/chitra-class.md b/docs/examples/chitra-class/chitra-class.md index 49c00acc..dbf26859 100644 --- a/docs/examples/chitra-class/chitra-class.md +++ b/docs/examples/chitra-class/chitra-class.md @@ -1,19 +1,11 @@ -Open In Colab - # Play with Images > `Chitra` is an image utility class that can load image from filelike object, web url or numpy image. It offers drawing bounding box over the image. - - ```python -# For latest update install from master -!pip install git+https://github.com/aniketmaurya/chitra@master -q -``` - +# pip install -U chitra -```python from chitra.image import Chitra import matplotlib.pyplot as plt ``` @@ -27,28 +19,48 @@ import matplotlib.pyplot as plt - **Corner(xyxy):** xmin ymin and xmax ymax - Plot bounding box on image -### Load image from web url and show +### Load image from web url and show ```python -url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/Image_created_with_a_mobile_phone.png/1200px-Image_created_with_a_mobile_phone.png' +url = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/Image_created_with_a_mobile_phone.png/1200px-Image_created_with_a_mobile_phone.png" image = Chitra(url) image.imshow() ``` - ![png](output_6_0.png) +You can cache the image downloaded from internet URL by passing `cache=True` in argument. +Second call to the same URL will not download from internet, instead image will be loaded from the local cache dir. +```python +# first call - image will be downloaded from internet and saved to local cache dir +image = Chitra(url, cache=True) -### Plot bounding box and label for the handphone +# second call - image will be loaded from local cached dir +image = Chitra(url, cache=True) +``` +### Plot bounding box and label for the handphone + ```python -b = [[600, 250, 900, 600.1]] -l = ['hand'] -image = Chitra(url, b, l) +box = [[600, 250, 900, 600.1]] +label = ['handphone'] +image = Chitra(url, box, label) image.image = image.image.convert('RGB') plt.imshow(image.draw_boxes()) ``` ![png](output_8_1.png) + +### Resize Image and Bounding at the same time +Chitra can rescale your bounding box automatically based on the new image size. + +```python +box = [[600, 250, 900, 600.1]] +label = ['handphone'] +image = Chitra(url, box, label) +image.resize_image_with_bbox((224, 224)) +print(image.bounding_boxes) +plt.imshow(image.draw_boxes()) +``` diff --git a/docs/examples/chitra-class/output_6_0.png b/docs/examples/chitra-class/output_6_0.png index c8ca4b70..88f68633 100644 Binary files a/docs/examples/chitra-class/output_6_0.png and b/docs/examples/chitra-class/output_6_0.png differ diff --git a/docs/examples/chitra-class/output_8_1.png b/docs/examples/chitra-class/output_8_1.png index 22a0d11e..ef56b3cb 100644 Binary files a/docs/examples/chitra-class/output_8_1.png and b/docs/examples/chitra-class/output_8_1.png differ diff --git a/docs/examples/image-classification/image-classification.md b/docs/examples/image-classification/image-classification.md index 4dc984c4..49821445 100644 --- a/docs/examples/image-classification/image-classification.md +++ b/docs/examples/image-classification/image-classification.md @@ -4,14 +4,7 @@ Training Image classification model for Cats vs Dogs Kaggle dataset. To install chitra -`pip install --upgrade chitra==0.0.22` - - -```python -!pip install chitra -q -``` - -  |████████████████████████████████| 1.1MB 18.1MB/s eta 0:00:01 +`pip install --upgrade "chitra[nn]"` ## import functions and classes @@ -22,17 +15,13 @@ Dataset class has API for loading `tf.data`, image augmentation and progressive The Trainer class inherits from tf.keras.Model, it contains everything that is required for training. It exposes trainer.cyclic_fit method which trains the model using Cyclic Learning rate discovered by Leslie Smith. - ```python import tensorflow as tf from chitra.datagenerator import Dataset from chitra.trainer import Trainer, create_cnn - from PIL import Image -``` -```python BS = 16 IMG_SIZE_LST = [(128,128), (160, 160), (224,224)] AUTOTUNE = tf.data.experimental.AUTOTUNE @@ -42,10 +31,10 @@ def tensor_to_image(tensor): return Image.fromarray(tensor.numpy().astype('uint8')) ``` -copy your kaggle key to `/root/.kaggle/kaggle.json` for downloading the dataset. +Copy your kaggle key to `/root/.kaggle/kaggle.json` for downloading the dataset. -```python +``` !kaggle datasets download -d chetankv/dogs-cats-images !unzip -q dogs-cats-images.zip ``` @@ -53,10 +42,8 @@ copy your kaggle key to `/root/.kaggle/kaggle.json` for downloading the dataset. ```python ds = Dataset('dog vs cat/dataset/training_set', image_size=IMG_SIZE_LST) -``` -```python image, label = ds[0] print(label) tensor_to_image(image).resize((224,224)) @@ -64,7 +51,7 @@ tensor_to_image(image).resize((224,224)) dogs - + ![png](output_10_1.png) @@ -80,346 +67,346 @@ trainer = Trainer(ds, create_cnn('mobilenetv2', num_classes=2)) WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default. - ```python trainer.summary() ``` +
Model Summary Model: "functional_1" __________________________________________________________________________________________________ - Layer (type) Output Shape Param # Connected to + Layer (type) Output Shape Param # Connected to ================================================================================================== - input_1 (InputLayer) [(None, None, None, 0 + input_1 (InputLayer) [(None, None, None, 0 __________________________________________________________________________________________________ - Conv1_pad (ZeroPadding2D) (None, None, None, 3 0 input_1[0][0] + Conv1_pad (ZeroPadding2D) (None, None, None, 3 0 input_1[0][0] __________________________________________________________________________________________________ - Conv1 (Conv2D) (None, None, None, 3 864 Conv1_pad[0][0] + Conv1 (Conv2D) (None, None, None, 3 864 Conv1_pad[0][0] __________________________________________________________________________________________________ - bn_Conv1 (BatchNormalization) (None, None, None, 3 128 Conv1[0][0] + bn_Conv1 (BatchNormalization) (None, None, None, 3 128 Conv1[0][0] __________________________________________________________________________________________________ - Conv1_relu (ReLU) (None, None, None, 3 0 bn_Conv1[0][0] + Conv1_relu (ReLU) (None, None, None, 3 0 bn_Conv1[0][0] __________________________________________________________________________________________________ - expanded_conv_depthwise (Depthw (None, None, None, 3 288 Conv1_relu[0][0] + expanded_conv_depthwise (Depthw (None, None, None, 3 288 Conv1_relu[0][0] __________________________________________________________________________________________________ - expanded_conv_depthwise_BN (Bat (None, None, None, 3 128 expanded_conv_depthwise[0][0] + expanded_conv_depthwise_BN (Bat (None, None, None, 3 128 expanded_conv_depthwise[0][0] __________________________________________________________________________________________________ - expanded_conv_depthwise_relu (R (None, None, None, 3 0 expanded_conv_depthwise_BN[0][0] + expanded_conv_depthwise_relu (R (None, None, None, 3 0 expanded_conv_depthwise_BN[0][0] __________________________________________________________________________________________________ expanded_conv_project (Conv2D) (None, None, None, 1 512 expanded_conv_depthwise_relu[0][0 __________________________________________________________________________________________________ - expanded_conv_project_BN (Batch (None, None, None, 1 64 expanded_conv_project[0][0] + expanded_conv_project_BN (Batch (None, None, None, 1 64 expanded_conv_project[0][0] __________________________________________________________________________________________________ - block_1_expand (Conv2D) (None, None, None, 9 1536 expanded_conv_project_BN[0][0] + block_1_expand (Conv2D) (None, None, None, 9 1536 expanded_conv_project_BN[0][0] __________________________________________________________________________________________________ - block_1_expand_BN (BatchNormali (None, None, None, 9 384 block_1_expand[0][0] + block_1_expand_BN (BatchNormali (None, None, None, 9 384 block_1_expand[0][0] __________________________________________________________________________________________________ - block_1_expand_relu (ReLU) (None, None, None, 9 0 block_1_expand_BN[0][0] + block_1_expand_relu (ReLU) (None, None, None, 9 0 block_1_expand_BN[0][0] __________________________________________________________________________________________________ - block_1_pad (ZeroPadding2D) (None, None, None, 9 0 block_1_expand_relu[0][0] + block_1_pad (ZeroPadding2D) (None, None, None, 9 0 block_1_expand_relu[0][0] __________________________________________________________________________________________________ - block_1_depthwise (DepthwiseCon (None, None, None, 9 864 block_1_pad[0][0] + block_1_depthwise (DepthwiseCon (None, None, None, 9 864 block_1_pad[0][0] __________________________________________________________________________________________________ - block_1_depthwise_BN (BatchNorm (None, None, None, 9 384 block_1_depthwise[0][0] + block_1_depthwise_BN (BatchNorm (None, None, None, 9 384 block_1_depthwise[0][0] __________________________________________________________________________________________________ - block_1_depthwise_relu (ReLU) (None, None, None, 9 0 block_1_depthwise_BN[0][0] + block_1_depthwise_relu (ReLU) (None, None, None, 9 0 block_1_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_1_project (Conv2D) (None, None, None, 2 2304 block_1_depthwise_relu[0][0] + block_1_project (Conv2D) (None, None, None, 2 2304 block_1_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_1_project_BN (BatchNormal (None, None, None, 2 96 block_1_project[0][0] + block_1_project_BN (BatchNormal (None, None, None, 2 96 block_1_project[0][0] __________________________________________________________________________________________________ - block_2_expand (Conv2D) (None, None, None, 1 3456 block_1_project_BN[0][0] + block_2_expand (Conv2D) (None, None, None, 1 3456 block_1_project_BN[0][0] __________________________________________________________________________________________________ - block_2_expand_BN (BatchNormali (None, None, None, 1 576 block_2_expand[0][0] + block_2_expand_BN (BatchNormali (None, None, None, 1 576 block_2_expand[0][0] __________________________________________________________________________________________________ - block_2_expand_relu (ReLU) (None, None, None, 1 0 block_2_expand_BN[0][0] + block_2_expand_relu (ReLU) (None, None, None, 1 0 block_2_expand_BN[0][0] __________________________________________________________________________________________________ - block_2_depthwise (DepthwiseCon (None, None, None, 1 1296 block_2_expand_relu[0][0] + block_2_depthwise (DepthwiseCon (None, None, None, 1 1296 block_2_expand_relu[0][0] __________________________________________________________________________________________________ - block_2_depthwise_BN (BatchNorm (None, None, None, 1 576 block_2_depthwise[0][0] + block_2_depthwise_BN (BatchNorm (None, None, None, 1 576 block_2_depthwise[0][0] __________________________________________________________________________________________________ - block_2_depthwise_relu (ReLU) (None, None, None, 1 0 block_2_depthwise_BN[0][0] + block_2_depthwise_relu (ReLU) (None, None, None, 1 0 block_2_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_2_project (Conv2D) (None, None, None, 2 3456 block_2_depthwise_relu[0][0] + block_2_project (Conv2D) (None, None, None, 2 3456 block_2_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_2_project_BN (BatchNormal (None, None, None, 2 96 block_2_project[0][0] + block_2_project_BN (BatchNormal (None, None, None, 2 96 block_2_project[0][0] __________________________________________________________________________________________________ - block_2_add (Add) (None, None, None, 2 0 block_1_project_BN[0][0] - block_2_project_BN[0][0] + block_2_add (Add) (None, None, None, 2 0 block_1_project_BN[0][0] + block_2_project_BN[0][0] __________________________________________________________________________________________________ - block_3_expand (Conv2D) (None, None, None, 1 3456 block_2_add[0][0] + block_3_expand (Conv2D) (None, None, None, 1 3456 block_2_add[0][0] __________________________________________________________________________________________________ - block_3_expand_BN (BatchNormali (None, None, None, 1 576 block_3_expand[0][0] + block_3_expand_BN (BatchNormali (None, None, None, 1 576 block_3_expand[0][0] __________________________________________________________________________________________________ - block_3_expand_relu (ReLU) (None, None, None, 1 0 block_3_expand_BN[0][0] + block_3_expand_relu (ReLU) (None, None, None, 1 0 block_3_expand_BN[0][0] __________________________________________________________________________________________________ - block_3_pad (ZeroPadding2D) (None, None, None, 1 0 block_3_expand_relu[0][0] + block_3_pad (ZeroPadding2D) (None, None, None, 1 0 block_3_expand_relu[0][0] __________________________________________________________________________________________________ - block_3_depthwise (DepthwiseCon (None, None, None, 1 1296 block_3_pad[0][0] + block_3_depthwise (DepthwiseCon (None, None, None, 1 1296 block_3_pad[0][0] __________________________________________________________________________________________________ - block_3_depthwise_BN (BatchNorm (None, None, None, 1 576 block_3_depthwise[0][0] + block_3_depthwise_BN (BatchNorm (None, None, None, 1 576 block_3_depthwise[0][0] __________________________________________________________________________________________________ - block_3_depthwise_relu (ReLU) (None, None, None, 1 0 block_3_depthwise_BN[0][0] + block_3_depthwise_relu (ReLU) (None, None, None, 1 0 block_3_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_3_project (Conv2D) (None, None, None, 3 4608 block_3_depthwise_relu[0][0] + block_3_project (Conv2D) (None, None, None, 3 4608 block_3_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_3_project_BN (BatchNormal (None, None, None, 3 128 block_3_project[0][0] + block_3_project_BN (BatchNormal (None, None, None, 3 128 block_3_project[0][0] __________________________________________________________________________________________________ - block_4_expand (Conv2D) (None, None, None, 1 6144 block_3_project_BN[0][0] + block_4_expand (Conv2D) (None, None, None, 1 6144 block_3_project_BN[0][0] __________________________________________________________________________________________________ - block_4_expand_BN (BatchNormali (None, None, None, 1 768 block_4_expand[0][0] + block_4_expand_BN (BatchNormali (None, None, None, 1 768 block_4_expand[0][0] __________________________________________________________________________________________________ - block_4_expand_relu (ReLU) (None, None, None, 1 0 block_4_expand_BN[0][0] + block_4_expand_relu (ReLU) (None, None, None, 1 0 block_4_expand_BN[0][0] __________________________________________________________________________________________________ - block_4_depthwise (DepthwiseCon (None, None, None, 1 1728 block_4_expand_relu[0][0] + block_4_depthwise (DepthwiseCon (None, None, None, 1 1728 block_4_expand_relu[0][0] __________________________________________________________________________________________________ - block_4_depthwise_BN (BatchNorm (None, None, None, 1 768 block_4_depthwise[0][0] + block_4_depthwise_BN (BatchNorm (None, None, None, 1 768 block_4_depthwise[0][0] __________________________________________________________________________________________________ - block_4_depthwise_relu (ReLU) (None, None, None, 1 0 block_4_depthwise_BN[0][0] + block_4_depthwise_relu (ReLU) (None, None, None, 1 0 block_4_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_4_project (Conv2D) (None, None, None, 3 6144 block_4_depthwise_relu[0][0] + block_4_project (Conv2D) (None, None, None, 3 6144 block_4_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_4_project_BN (BatchNormal (None, None, None, 3 128 block_4_project[0][0] + block_4_project_BN (BatchNormal (None, None, None, 3 128 block_4_project[0][0] __________________________________________________________________________________________________ - block_4_add (Add) (None, None, None, 3 0 block_3_project_BN[0][0] - block_4_project_BN[0][0] + block_4_add (Add) (None, None, None, 3 0 block_3_project_BN[0][0] + block_4_project_BN[0][0] __________________________________________________________________________________________________ - block_5_expand (Conv2D) (None, None, None, 1 6144 block_4_add[0][0] + block_5_expand (Conv2D) (None, None, None, 1 6144 block_4_add[0][0] __________________________________________________________________________________________________ - block_5_expand_BN (BatchNormali (None, None, None, 1 768 block_5_expand[0][0] + block_5_expand_BN (BatchNormali (None, None, None, 1 768 block_5_expand[0][0] __________________________________________________________________________________________________ - block_5_expand_relu (ReLU) (None, None, None, 1 0 block_5_expand_BN[0][0] + block_5_expand_relu (ReLU) (None, None, None, 1 0 block_5_expand_BN[0][0] __________________________________________________________________________________________________ - block_5_depthwise (DepthwiseCon (None, None, None, 1 1728 block_5_expand_relu[0][0] + block_5_depthwise (DepthwiseCon (None, None, None, 1 1728 block_5_expand_relu[0][0] __________________________________________________________________________________________________ - block_5_depthwise_BN (BatchNorm (None, None, None, 1 768 block_5_depthwise[0][0] + block_5_depthwise_BN (BatchNorm (None, None, None, 1 768 block_5_depthwise[0][0] __________________________________________________________________________________________________ - block_5_depthwise_relu (ReLU) (None, None, None, 1 0 block_5_depthwise_BN[0][0] + block_5_depthwise_relu (ReLU) (None, None, None, 1 0 block_5_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_5_project (Conv2D) (None, None, None, 3 6144 block_5_depthwise_relu[0][0] + block_5_project (Conv2D) (None, None, None, 3 6144 block_5_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_5_project_BN (BatchNormal (None, None, None, 3 128 block_5_project[0][0] + block_5_project_BN (BatchNormal (None, None, None, 3 128 block_5_project[0][0] __________________________________________________________________________________________________ - block_5_add (Add) (None, None, None, 3 0 block_4_add[0][0] - block_5_project_BN[0][0] + block_5_add (Add) (None, None, None, 3 0 block_4_add[0][0] + block_5_project_BN[0][0] __________________________________________________________________________________________________ - block_6_expand (Conv2D) (None, None, None, 1 6144 block_5_add[0][0] + block_6_expand (Conv2D) (None, None, None, 1 6144 block_5_add[0][0] __________________________________________________________________________________________________ - block_6_expand_BN (BatchNormali (None, None, None, 1 768 block_6_expand[0][0] + block_6_expand_BN (BatchNormali (None, None, None, 1 768 block_6_expand[0][0] __________________________________________________________________________________________________ - block_6_expand_relu (ReLU) (None, None, None, 1 0 block_6_expand_BN[0][0] + block_6_expand_relu (ReLU) (None, None, None, 1 0 block_6_expand_BN[0][0] __________________________________________________________________________________________________ - block_6_pad (ZeroPadding2D) (None, None, None, 1 0 block_6_expand_relu[0][0] + block_6_pad (ZeroPadding2D) (None, None, None, 1 0 block_6_expand_relu[0][0] __________________________________________________________________________________________________ - block_6_depthwise (DepthwiseCon (None, None, None, 1 1728 block_6_pad[0][0] + block_6_depthwise (DepthwiseCon (None, None, None, 1 1728 block_6_pad[0][0] __________________________________________________________________________________________________ - block_6_depthwise_BN (BatchNorm (None, None, None, 1 768 block_6_depthwise[0][0] + block_6_depthwise_BN (BatchNorm (None, None, None, 1 768 block_6_depthwise[0][0] __________________________________________________________________________________________________ - block_6_depthwise_relu (ReLU) (None, None, None, 1 0 block_6_depthwise_BN[0][0] + block_6_depthwise_relu (ReLU) (None, None, None, 1 0 block_6_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_6_project (Conv2D) (None, None, None, 6 12288 block_6_depthwise_relu[0][0] + block_6_project (Conv2D) (None, None, None, 6 12288 block_6_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_6_project_BN (BatchNormal (None, None, None, 6 256 block_6_project[0][0] + block_6_project_BN (BatchNormal (None, None, None, 6 256 block_6_project[0][0] __________________________________________________________________________________________________ - block_7_expand (Conv2D) (None, None, None, 3 24576 block_6_project_BN[0][0] + block_7_expand (Conv2D) (None, None, None, 3 24576 block_6_project_BN[0][0] __________________________________________________________________________________________________ - block_7_expand_BN (BatchNormali (None, None, None, 3 1536 block_7_expand[0][0] + block_7_expand_BN (BatchNormali (None, None, None, 3 1536 block_7_expand[0][0] __________________________________________________________________________________________________ - block_7_expand_relu (ReLU) (None, None, None, 3 0 block_7_expand_BN[0][0] + block_7_expand_relu (ReLU) (None, None, None, 3 0 block_7_expand_BN[0][0] __________________________________________________________________________________________________ - block_7_depthwise (DepthwiseCon (None, None, None, 3 3456 block_7_expand_relu[0][0] + block_7_depthwise (DepthwiseCon (None, None, None, 3 3456 block_7_expand_relu[0][0] __________________________________________________________________________________________________ - block_7_depthwise_BN (BatchNorm (None, None, None, 3 1536 block_7_depthwise[0][0] + block_7_depthwise_BN (BatchNorm (None, None, None, 3 1536 block_7_depthwise[0][0] __________________________________________________________________________________________________ - block_7_depthwise_relu (ReLU) (None, None, None, 3 0 block_7_depthwise_BN[0][0] + block_7_depthwise_relu (ReLU) (None, None, None, 3 0 block_7_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_7_project (Conv2D) (None, None, None, 6 24576 block_7_depthwise_relu[0][0] + block_7_project (Conv2D) (None, None, None, 6 24576 block_7_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_7_project_BN (BatchNormal (None, None, None, 6 256 block_7_project[0][0] + block_7_project_BN (BatchNormal (None, None, None, 6 256 block_7_project[0][0] __________________________________________________________________________________________________ - block_7_add (Add) (None, None, None, 6 0 block_6_project_BN[0][0] - block_7_project_BN[0][0] + block_7_add (Add) (None, None, None, 6 0 block_6_project_BN[0][0] + block_7_project_BN[0][0] __________________________________________________________________________________________________ - block_8_expand (Conv2D) (None, None, None, 3 24576 block_7_add[0][0] + block_8_expand (Conv2D) (None, None, None, 3 24576 block_7_add[0][0] __________________________________________________________________________________________________ - block_8_expand_BN (BatchNormali (None, None, None, 3 1536 block_8_expand[0][0] + block_8_expand_BN (BatchNormali (None, None, None, 3 1536 block_8_expand[0][0] __________________________________________________________________________________________________ - block_8_expand_relu (ReLU) (None, None, None, 3 0 block_8_expand_BN[0][0] + block_8_expand_relu (ReLU) (None, None, None, 3 0 block_8_expand_BN[0][0] __________________________________________________________________________________________________ - block_8_depthwise (DepthwiseCon (None, None, None, 3 3456 block_8_expand_relu[0][0] + block_8_depthwise (DepthwiseCon (None, None, None, 3 3456 block_8_expand_relu[0][0] __________________________________________________________________________________________________ - block_8_depthwise_BN (BatchNorm (None, None, None, 3 1536 block_8_depthwise[0][0] + block_8_depthwise_BN (BatchNorm (None, None, None, 3 1536 block_8_depthwise[0][0] __________________________________________________________________________________________________ - block_8_depthwise_relu (ReLU) (None, None, None, 3 0 block_8_depthwise_BN[0][0] + block_8_depthwise_relu (ReLU) (None, None, None, 3 0 block_8_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_8_project (Conv2D) (None, None, None, 6 24576 block_8_depthwise_relu[0][0] + block_8_project (Conv2D) (None, None, None, 6 24576 block_8_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_8_project_BN (BatchNormal (None, None, None, 6 256 block_8_project[0][0] + block_8_project_BN (BatchNormal (None, None, None, 6 256 block_8_project[0][0] __________________________________________________________________________________________________ - block_8_add (Add) (None, None, None, 6 0 block_7_add[0][0] - block_8_project_BN[0][0] + block_8_add (Add) (None, None, None, 6 0 block_7_add[0][0] + block_8_project_BN[0][0] __________________________________________________________________________________________________ - block_9_expand (Conv2D) (None, None, None, 3 24576 block_8_add[0][0] + block_9_expand (Conv2D) (None, None, None, 3 24576 block_8_add[0][0] __________________________________________________________________________________________________ - block_9_expand_BN (BatchNormali (None, None, None, 3 1536 block_9_expand[0][0] + block_9_expand_BN (BatchNormali (None, None, None, 3 1536 block_9_expand[0][0] __________________________________________________________________________________________________ - block_9_expand_relu (ReLU) (None, None, None, 3 0 block_9_expand_BN[0][0] + block_9_expand_relu (ReLU) (None, None, None, 3 0 block_9_expand_BN[0][0] __________________________________________________________________________________________________ - block_9_depthwise (DepthwiseCon (None, None, None, 3 3456 block_9_expand_relu[0][0] + block_9_depthwise (DepthwiseCon (None, None, None, 3 3456 block_9_expand_relu[0][0] __________________________________________________________________________________________________ - block_9_depthwise_BN (BatchNorm (None, None, None, 3 1536 block_9_depthwise[0][0] + block_9_depthwise_BN (BatchNorm (None, None, None, 3 1536 block_9_depthwise[0][0] __________________________________________________________________________________________________ - block_9_depthwise_relu (ReLU) (None, None, None, 3 0 block_9_depthwise_BN[0][0] + block_9_depthwise_relu (ReLU) (None, None, None, 3 0 block_9_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_9_project (Conv2D) (None, None, None, 6 24576 block_9_depthwise_relu[0][0] + block_9_project (Conv2D) (None, None, None, 6 24576 block_9_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_9_project_BN (BatchNormal (None, None, None, 6 256 block_9_project[0][0] + block_9_project_BN (BatchNormal (None, None, None, 6 256 block_9_project[0][0] __________________________________________________________________________________________________ - block_9_add (Add) (None, None, None, 6 0 block_8_add[0][0] - block_9_project_BN[0][0] + block_9_add (Add) (None, None, None, 6 0 block_8_add[0][0] + block_9_project_BN[0][0] __________________________________________________________________________________________________ - block_10_expand (Conv2D) (None, None, None, 3 24576 block_9_add[0][0] + block_10_expand (Conv2D) (None, None, None, 3 24576 block_9_add[0][0] __________________________________________________________________________________________________ - block_10_expand_BN (BatchNormal (None, None, None, 3 1536 block_10_expand[0][0] + block_10_expand_BN (BatchNormal (None, None, None, 3 1536 block_10_expand[0][0] __________________________________________________________________________________________________ - block_10_expand_relu (ReLU) (None, None, None, 3 0 block_10_expand_BN[0][0] + block_10_expand_relu (ReLU) (None, None, None, 3 0 block_10_expand_BN[0][0] __________________________________________________________________________________________________ - block_10_depthwise (DepthwiseCo (None, None, None, 3 3456 block_10_expand_relu[0][0] + block_10_depthwise (DepthwiseCo (None, None, None, 3 3456 block_10_expand_relu[0][0] __________________________________________________________________________________________________ - block_10_depthwise_BN (BatchNor (None, None, None, 3 1536 block_10_depthwise[0][0] + block_10_depthwise_BN (BatchNor (None, None, None, 3 1536 block_10_depthwise[0][0] __________________________________________________________________________________________________ - block_10_depthwise_relu (ReLU) (None, None, None, 3 0 block_10_depthwise_BN[0][0] + block_10_depthwise_relu (ReLU) (None, None, None, 3 0 block_10_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_10_project (Conv2D) (None, None, None, 9 36864 block_10_depthwise_relu[0][0] + block_10_project (Conv2D) (None, None, None, 9 36864 block_10_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_10_project_BN (BatchNorma (None, None, None, 9 384 block_10_project[0][0] + block_10_project_BN (BatchNorma (None, None, None, 9 384 block_10_project[0][0] __________________________________________________________________________________________________ - block_11_expand (Conv2D) (None, None, None, 5 55296 block_10_project_BN[0][0] + block_11_expand (Conv2D) (None, None, None, 5 55296 block_10_project_BN[0][0] __________________________________________________________________________________________________ - block_11_expand_BN (BatchNormal (None, None, None, 5 2304 block_11_expand[0][0] + block_11_expand_BN (BatchNormal (None, None, None, 5 2304 block_11_expand[0][0] __________________________________________________________________________________________________ - block_11_expand_relu (ReLU) (None, None, None, 5 0 block_11_expand_BN[0][0] + block_11_expand_relu (ReLU) (None, None, None, 5 0 block_11_expand_BN[0][0] __________________________________________________________________________________________________ - block_11_depthwise (DepthwiseCo (None, None, None, 5 5184 block_11_expand_relu[0][0] + block_11_depthwise (DepthwiseCo (None, None, None, 5 5184 block_11_expand_relu[0][0] __________________________________________________________________________________________________ - block_11_depthwise_BN (BatchNor (None, None, None, 5 2304 block_11_depthwise[0][0] + block_11_depthwise_BN (BatchNor (None, None, None, 5 2304 block_11_depthwise[0][0] __________________________________________________________________________________________________ - block_11_depthwise_relu (ReLU) (None, None, None, 5 0 block_11_depthwise_BN[0][0] + block_11_depthwise_relu (ReLU) (None, None, None, 5 0 block_11_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_11_project (Conv2D) (None, None, None, 9 55296 block_11_depthwise_relu[0][0] + block_11_project (Conv2D) (None, None, None, 9 55296 block_11_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_11_project_BN (BatchNorma (None, None, None, 9 384 block_11_project[0][0] + block_11_project_BN (BatchNorma (None, None, None, 9 384 block_11_project[0][0] __________________________________________________________________________________________________ - block_11_add (Add) (None, None, None, 9 0 block_10_project_BN[0][0] - block_11_project_BN[0][0] + block_11_add (Add) (None, None, None, 9 0 block_10_project_BN[0][0] + block_11_project_BN[0][0] __________________________________________________________________________________________________ - block_12_expand (Conv2D) (None, None, None, 5 55296 block_11_add[0][0] + block_12_expand (Conv2D) (None, None, None, 5 55296 block_11_add[0][0] __________________________________________________________________________________________________ - block_12_expand_BN (BatchNormal (None, None, None, 5 2304 block_12_expand[0][0] + block_12_expand_BN (BatchNormal (None, None, None, 5 2304 block_12_expand[0][0] __________________________________________________________________________________________________ - block_12_expand_relu (ReLU) (None, None, None, 5 0 block_12_expand_BN[0][0] + block_12_expand_relu (ReLU) (None, None, None, 5 0 block_12_expand_BN[0][0] __________________________________________________________________________________________________ - block_12_depthwise (DepthwiseCo (None, None, None, 5 5184 block_12_expand_relu[0][0] + block_12_depthwise (DepthwiseCo (None, None, None, 5 5184 block_12_expand_relu[0][0] __________________________________________________________________________________________________ - block_12_depthwise_BN (BatchNor (None, None, None, 5 2304 block_12_depthwise[0][0] + block_12_depthwise_BN (BatchNor (None, None, None, 5 2304 block_12_depthwise[0][0] __________________________________________________________________________________________________ - block_12_depthwise_relu (ReLU) (None, None, None, 5 0 block_12_depthwise_BN[0][0] + block_12_depthwise_relu (ReLU) (None, None, None, 5 0 block_12_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_12_project (Conv2D) (None, None, None, 9 55296 block_12_depthwise_relu[0][0] + block_12_project (Conv2D) (None, None, None, 9 55296 block_12_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_12_project_BN (BatchNorma (None, None, None, 9 384 block_12_project[0][0] + block_12_project_BN (BatchNorma (None, None, None, 9 384 block_12_project[0][0] __________________________________________________________________________________________________ - block_12_add (Add) (None, None, None, 9 0 block_11_add[0][0] - block_12_project_BN[0][0] + block_12_add (Add) (None, None, None, 9 0 block_11_add[0][0] + block_12_project_BN[0][0] __________________________________________________________________________________________________ - block_13_expand (Conv2D) (None, None, None, 5 55296 block_12_add[0][0] + block_13_expand (Conv2D) (None, None, None, 5 55296 block_12_add[0][0] __________________________________________________________________________________________________ - block_13_expand_BN (BatchNormal (None, None, None, 5 2304 block_13_expand[0][0] + block_13_expand_BN (BatchNormal (None, None, None, 5 2304 block_13_expand[0][0] __________________________________________________________________________________________________ - block_13_expand_relu (ReLU) (None, None, None, 5 0 block_13_expand_BN[0][0] + block_13_expand_relu (ReLU) (None, None, None, 5 0 block_13_expand_BN[0][0] __________________________________________________________________________________________________ - block_13_pad (ZeroPadding2D) (None, None, None, 5 0 block_13_expand_relu[0][0] + block_13_pad (ZeroPadding2D) (None, None, None, 5 0 block_13_expand_relu[0][0] __________________________________________________________________________________________________ - block_13_depthwise (DepthwiseCo (None, None, None, 5 5184 block_13_pad[0][0] + block_13_depthwise (DepthwiseCo (None, None, None, 5 5184 block_13_pad[0][0] __________________________________________________________________________________________________ - block_13_depthwise_BN (BatchNor (None, None, None, 5 2304 block_13_depthwise[0][0] + block_13_depthwise_BN (BatchNor (None, None, None, 5 2304 block_13_depthwise[0][0] __________________________________________________________________________________________________ - block_13_depthwise_relu (ReLU) (None, None, None, 5 0 block_13_depthwise_BN[0][0] + block_13_depthwise_relu (ReLU) (None, None, None, 5 0 block_13_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_13_project (Conv2D) (None, None, None, 1 92160 block_13_depthwise_relu[0][0] + block_13_project (Conv2D) (None, None, None, 1 92160 block_13_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_13_project_BN (BatchNorma (None, None, None, 1 640 block_13_project[0][0] + block_13_project_BN (BatchNorma (None, None, None, 1 640 block_13_project[0][0] __________________________________________________________________________________________________ - block_14_expand (Conv2D) (None, None, None, 9 153600 block_13_project_BN[0][0] + block_14_expand (Conv2D) (None, None, None, 9 153600 block_13_project_BN[0][0] __________________________________________________________________________________________________ - block_14_expand_BN (BatchNormal (None, None, None, 9 3840 block_14_expand[0][0] + block_14_expand_BN (BatchNormal (None, None, None, 9 3840 block_14_expand[0][0] __________________________________________________________________________________________________ - block_14_expand_relu (ReLU) (None, None, None, 9 0 block_14_expand_BN[0][0] + block_14_expand_relu (ReLU) (None, None, None, 9 0 block_14_expand_BN[0][0] __________________________________________________________________________________________________ - block_14_depthwise (DepthwiseCo (None, None, None, 9 8640 block_14_expand_relu[0][0] + block_14_depthwise (DepthwiseCo (None, None, None, 9 8640 block_14_expand_relu[0][0] __________________________________________________________________________________________________ - block_14_depthwise_BN (BatchNor (None, None, None, 9 3840 block_14_depthwise[0][0] + block_14_depthwise_BN (BatchNor (None, None, None, 9 3840 block_14_depthwise[0][0] __________________________________________________________________________________________________ - block_14_depthwise_relu (ReLU) (None, None, None, 9 0 block_14_depthwise_BN[0][0] + block_14_depthwise_relu (ReLU) (None, None, None, 9 0 block_14_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_14_project (Conv2D) (None, None, None, 1 153600 block_14_depthwise_relu[0][0] + block_14_project (Conv2D) (None, None, None, 1 153600 block_14_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_14_project_BN (BatchNorma (None, None, None, 1 640 block_14_project[0][0] + block_14_project_BN (BatchNorma (None, None, None, 1 640 block_14_project[0][0] __________________________________________________________________________________________________ - block_14_add (Add) (None, None, None, 1 0 block_13_project_BN[0][0] - block_14_project_BN[0][0] + block_14_add (Add) (None, None, None, 1 0 block_13_project_BN[0][0] + block_14_project_BN[0][0] __________________________________________________________________________________________________ - block_15_expand (Conv2D) (None, None, None, 9 153600 block_14_add[0][0] + block_15_expand (Conv2D) (None, None, None, 9 153600 block_14_add[0][0] __________________________________________________________________________________________________ - block_15_expand_BN (BatchNormal (None, None, None, 9 3840 block_15_expand[0][0] + block_15_expand_BN (BatchNormal (None, None, None, 9 3840 block_15_expand[0][0] __________________________________________________________________________________________________ - block_15_expand_relu (ReLU) (None, None, None, 9 0 block_15_expand_BN[0][0] + block_15_expand_relu (ReLU) (None, None, None, 9 0 block_15_expand_BN[0][0] __________________________________________________________________________________________________ - block_15_depthwise (DepthwiseCo (None, None, None, 9 8640 block_15_expand_relu[0][0] + block_15_depthwise (DepthwiseCo (None, None, None, 9 8640 block_15_expand_relu[0][0] __________________________________________________________________________________________________ - block_15_depthwise_BN (BatchNor (None, None, None, 9 3840 block_15_depthwise[0][0] + block_15_depthwise_BN (BatchNor (None, None, None, 9 3840 block_15_depthwise[0][0] __________________________________________________________________________________________________ - block_15_depthwise_relu (ReLU) (None, None, None, 9 0 block_15_depthwise_BN[0][0] + block_15_depthwise_relu (ReLU) (None, None, None, 9 0 block_15_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_15_project (Conv2D) (None, None, None, 1 153600 block_15_depthwise_relu[0][0] + block_15_project (Conv2D) (None, None, None, 1 153600 block_15_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_15_project_BN (BatchNorma (None, None, None, 1 640 block_15_project[0][0] + block_15_project_BN (BatchNorma (None, None, None, 1 640 block_15_project[0][0] __________________________________________________________________________________________________ - block_15_add (Add) (None, None, None, 1 0 block_14_add[0][0] - block_15_project_BN[0][0] + block_15_add (Add) (None, None, None, 1 0 block_14_add[0][0] + block_15_project_BN[0][0] __________________________________________________________________________________________________ - block_16_expand (Conv2D) (None, None, None, 9 153600 block_15_add[0][0] + block_16_expand (Conv2D) (None, None, None, 9 153600 block_15_add[0][0] __________________________________________________________________________________________________ - block_16_expand_BN (BatchNormal (None, None, None, 9 3840 block_16_expand[0][0] + block_16_expand_BN (BatchNormal (None, None, None, 9 3840 block_16_expand[0][0] __________________________________________________________________________________________________ - block_16_expand_relu (ReLU) (None, None, None, 9 0 block_16_expand_BN[0][0] + block_16_expand_relu (ReLU) (None, None, None, 9 0 block_16_expand_BN[0][0] __________________________________________________________________________________________________ - block_16_depthwise (DepthwiseCo (None, None, None, 9 8640 block_16_expand_relu[0][0] + block_16_depthwise (DepthwiseCo (None, None, None, 9 8640 block_16_expand_relu[0][0] __________________________________________________________________________________________________ - block_16_depthwise_BN (BatchNor (None, None, None, 9 3840 block_16_depthwise[0][0] + block_16_depthwise_BN (BatchNor (None, None, None, 9 3840 block_16_depthwise[0][0] __________________________________________________________________________________________________ - block_16_depthwise_relu (ReLU) (None, None, None, 9 0 block_16_depthwise_BN[0][0] + block_16_depthwise_relu (ReLU) (None, None, None, 9 0 block_16_depthwise_BN[0][0] __________________________________________________________________________________________________ - block_16_project (Conv2D) (None, None, None, 3 307200 block_16_depthwise_relu[0][0] + block_16_project (Conv2D) (None, None, None, 3 307200 block_16_depthwise_relu[0][0] __________________________________________________________________________________________________ - block_16_project_BN (BatchNorma (None, None, None, 3 1280 block_16_project[0][0] + block_16_project_BN (BatchNorma (None, None, None, 3 1280 block_16_project[0][0] __________________________________________________________________________________________________ - Conv_1 (Conv2D) (None, None, None, 1 409600 block_16_project_BN[0][0] + Conv_1 (Conv2D) (None, None, None, 1 409600 block_16_project_BN[0][0] __________________________________________________________________________________________________ - Conv_1_bn (BatchNormalization) (None, None, None, 1 5120 Conv_1[0][0] + Conv_1_bn (BatchNormalization) (None, None, None, 1 5120 Conv_1[0][0] __________________________________________________________________________________________________ - out_relu (ReLU) (None, None, None, 1 0 Conv_1_bn[0][0] + out_relu (ReLU) (None, None, None, 1 0 Conv_1_bn[0][0] __________________________________________________________________________________________________ - global_average_pooling2d (Globa (None, 1280) 0 out_relu[0][0] + global_average_pooling2d (Globa (None, 1280) 0 out_relu[0][0] __________________________________________________________________________________________________ - dropout (Dropout) (None, 1280) 0 global_average_pooling2d[0][0] + dropout (Dropout) (None, 1280) 0 global_average_pooling2d[0][0] __________________________________________________________________________________________________ - output (Dense) (None, 1) 1281 dropout[0][0] + output (Dense) (None, 1) 1281 dropout[0][0] ================================================================================================== Total params: 2,259,265 Trainable params: 2,225,153 Non-trainable params: 34,112 __________________________________________________________________________________________________ - +
```python @@ -433,7 +420,6 @@ trainer.compile2(batch_size=BS, Model compiled! - ```python trainer.cyclic_fit(10, batch_size=BS) ``` @@ -467,14 +453,9 @@ trainer.cyclic_fit(10, batch_size=BS) Returning the last set size which is: (224, 224) 500/500 [==============================] - 79s 159ms/step - loss: 0.0172 - binary_accuracy: 0.9940 - - - - - Trainer also supports the regular keras `model.fit` api using `trainer.fit` Train the same model **without cyclic learning rate**: @@ -498,6 +479,7 @@ trainer.fit(data, epochs=10) ```
Training loop... + Epoch 1/10 500/500 [==============================] - 38s 77ms/step - loss: 0.4070 - binary_accuracy: 0.8026 Epoch 2/10 @@ -528,13 +510,9 @@ trainer.fit(data,
- - - - # What does model focus on while making a prediction? `chitra.trainer.InterpretModel` class creates GradCAM and GradCAM++ visualization in no additional code! @@ -542,23 +520,13 @@ trainer.fit(data, ```python from chitra.trainer import InterpretModel import random -model_interpret = InterpretModel(True, trainer) -``` -```python +model_interpret = InterpretModel(True, trainer) image_tensor = random.choice(ds)[0] image = tensor_to_image(image_tensor) model_interpret(image, auto_resize=False) ``` - ![png](output_21_0.png) - - - - -```python - -``` diff --git a/docs/examples/image-classification/output_10_1.png b/docs/examples/image-classification/output_10_1.png index 279edead..30c0d40c 100644 Binary files a/docs/examples/image-classification/output_10_1.png and b/docs/examples/image-classification/output_10_1.png differ diff --git a/docs/examples/image-classification/output_21_0.png b/docs/examples/image-classification/output_21_0.png index f1b536a4..014503c3 100644 Binary files a/docs/examples/image-classification/output_21_0.png and b/docs/examples/image-classification/output_21_0.png differ diff --git a/docs/examples/model-server/model-server.md b/docs/examples/model-server/model-server.md new file mode 100644 index 00000000..4bf2c81d --- /dev/null +++ b/docs/examples/model-server/model-server.md @@ -0,0 +1,38 @@ +# Chitra Model Server + +Create API for Any Learning Model - ML, DL, Image Classification, NLP, Tensorflow or PyTorch. + +## What can it do? + +- Serve Any Learning Model +- Predefined processing functions for image classification (NLP processing functions coming soon) +- Override custom preprocessing and Postprocessing function with your own. + + +## Code + +```python +# install chitra +# pip install -U chitra + +from chitra.serve import create_api +from chitra.trainer import create_cnn + + +model = create_cnn('mobilenetv2', num_classes=2) + +create_api(model, run=True, api_type='image-classification') +``` + +Open `http://127.0.0.1:8000/docs` in your browser and try out the API. You can upload any image to try out the API. + + +If you want to try out *Text Classification* or *Question-Answering* task then all you have to do is change `api-type="text-classification"` or `api_type="question-ans"` then pass your model and you are all set. + + +> Request Response Schema (JSON body) will be changed based on the `api_type`. + + +#### Preview Question Answering API + +![png](preview-qna.png) diff --git a/docs/examples/model-server/preview-qna.png b/docs/examples/model-server/preview-qna.png new file mode 100644 index 00000000..bc0a3f43 Binary files /dev/null and b/docs/examples/model-server/preview-qna.png differ diff --git a/docs/examples/model-server/preview.png b/docs/examples/model-server/preview.png new file mode 100644 index 00000000..41cbf225 Binary files /dev/null and b/docs/examples/model-server/preview.png differ diff --git a/docs/index.md b/docs/index.md index c619f404..f4c6a5f6 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,88 +1,112 @@ # chitra -![](https://www.code-inspector.com/project/16652/score/svg) -![](https://www.code-inspector.com/project/16652/status/svg) + +[![CodeFactor](https://www.codefactor.io/repository/github/aniketmaurya/chitra/badge)](https://www.codefactor.io/repository/github/aniketmaurya/chitra) +[![Maintainability Rating](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=sqale_rating)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=reliability_rating)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=security_rating)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +[![Coverage](https://sonarcloud.io/api/project_badges/measure?project=aniketmaurya_chitra&metric=coverage)](https://sonarcloud.io/dashboard?id=aniketmaurya_chitra) +![GitHub issues](https://img.shields.io/github/issues/aniketmaurya/chitra?style=flat) +[![Documentation Status](https://readthedocs.org/projects/chitra/badge/?version=latest)](https://chitra.readthedocs.io/en/latest/?badge=latest) +[![Discord](https://img.shields.io/discord/848469007443165184?style=flat)](https://discord.gg/TdnAfDw3kB) ## What is chitra? -**chitra** (**चित्र**) is a Deep Learning Computer Vision library for easy data loading, model building and model interpretation with GradCAM/GradCAM++. +**chitra** (**चित्र**) is a multi-functional library for full-stack Deep Learning. It simplifies Model Building, API development, and Model Deployment. -Highlights: -- Faster data loading without any boilerplate. -- Progressive resizing of images. -- Rapid experiments with different models using `chitra.trainer` module. -- Train models with cyclic learning rate. -- Model interpretation using GradCAM/GradCAM++ with no extra code. +### Components +arch -If you have more use case please [**raise an issue/PR**](https://github.com/aniketmaurya/chitra/issues/new/choose) with the feature you want. +Load Image from Internet url, filepath or `numpy` array and plot Bounding Boxes on the images easily. +Model Training and Explainable AI. +Easily create UI for Machine Learning models or Rest API backend that can be deployed for serving ML Models in Production. -> Join discord channel - https://discord.gg/TdnAfDw3kB +### 📌 Highlights: +- [New] [Auto Dockerization of Models](https://chitra.readthedocs.io/en/latest/source/cli/builder/builder-create/) 🐳 +- [New] [Framework Agnostic Model Serving & Interactive UI prototype app](https://chitra.readthedocs.io/en/latest/source/api/serve/model_server/) ✨🌟 +- [New] [Data Visualization, Bounding Box Visualization](https://chitra.readthedocs.io/en/latest/source/api/image/chitra-class/) 🐶🎨 +- Model interpretation using GradCAM/GradCAM++ with no extra code 🔥 +- Faster data loading without any boilerplate 🤺 +- Progressive resizing of images 🎨 +- Rapid experiments with different models using `chitra.trainer` module 🚀 +### 🚘 Implementation Roadmap -## Installation +- One click deployment to `serverless` platform. -### Using pip (recommended) +> If you have more use case please [**raise an issue/PR**](https://github.com/aniketmaurya/chitra/issues/new/choose) with the feature you want. +> If you want to contribute, feel free to raise a PR. It doesn't need to be perfect. +> We will help you get there. + +## 📀 Installation +[![Downloads](https://pepy.tech/badge/chitra)](https://pepy.tech/project/chitra) +[![Downloads](https://pepy.tech/badge/chitra/month)](https://pepy.tech/project/chitra) +![GitHub License](https://img.shields.io/github/license/aniketmaurya/chitra?style=flat) + +### Using pip (recommended) +1. Minimum installation `pip install -U chitra` +1. Full Installation +`pip install -U 'chitra[all]'` + +1. Install for Training +`pip install -U 'chitra[nn]'` + +1. Install for Serving +`pip install -U 'chitra[serve]'` + ### From source ``` -git clone https://github.com/aniketmaurya/chitra.git -cd chitra -pip install -e . +pip install git+https://github.com/aniketmaurya/chitra@master ``` -### From GitHub -``` -pip install git+https://github.com/aniketmaurya/chitra@master +Or, +``` +git clone https://github.com/aniketmaurya/chitra.git +cd chitra +pip install . ``` -## Usage +## 🧑‍💻 Usage ### Loading data for image classification -Chitra `dataloader` and `datagenerator` modules for loading data. `dataloader` is a minimal dataloader that returns `tf.data.Dataset` object. `datagenerator` provides flexibility to users on how they want to load and manipulate the data. +Chitra `dataloader` and `datagenerator` modules for loading data. `dataloader` is a minimal dataloader that +returns `tf.data.Dataset` object. `datagenerator` provides flexibility to users on how they want to load and manipulate +the data. -``` +```python import numpy as np -import tensorflow as tf import chitra -from chitra.dataloader import Clf, show_batch +from chitra.dataloader import Clf import matplotlib.pyplot as plt -``` -``` + clf_dl = Clf() data = clf_dl.from_folder(cat_dog_path, target_shape=(224, 224)) - -clf_dl.show_batch(8, figsize=(8,8)) -``` - -``` -for e in data.take(1): - image = e[0].numpy().astype('uint8') - label = e[1].numpy() -plt.imshow(image) -plt.show() +clf_dl.show_batch(8, figsize=(8, 8)) ``` - -![png]({{ nbs_image.base_url }}/output_6_0.png) - +![Show Batch](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/images/output_3_1.png) ## Image datagenerator + Dataset class provides the flexibility to load image dataset by updating components of the class. Components of Dataset class are: + - image file generator - resizer - label generator - image loader -These components can be updated with custom function by the user according to their dataset structure. For example the Tiny Imagenet dataset is organized as- +These components can be updated with custom function by the user according to their dataset structure. For example the +Tiny Imagenet dataset is organized as- ``` train_folder/ @@ -95,74 +119,69 @@ train_folder/ . . ......imageN.jpg - - ``` -The inbuilt file generator search for images on the `folder1`, now we can just update the `image file generator` and rest of the functionality will remain same. +The inbuilt file generator search for images on the `folder1`, now we can just update the `image file generator` and +rest of the functionality will remain same. **Dataset also support progressive resizing of images.** ### Updating component -``` +```python from chitra.datagenerator import Dataset -from glob import glob ds = Dataset(data_path) # it will load the folders and NOT images ds.filenames[:3] ``` - No item present in the image size list - - - +
Output + No item present in the image size list ['/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/n02795169_boxes.txt', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02769748/images'] +
- -``` +```python def load_files(path): return glob(f'{path}/*/images/*') + def get_label(path): return path.split('/')[-3] - + + ds.update_component('get_filenames', load_files) ds.filenames[:3] ``` +
Output + get_filenames updated with No item present in the image size list - - - - ['/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images/n02795169_369.JPEG', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images/n02795169_386.JPEG', '/Users/aniket/Pictures/data/tiny-imagenet-200/train/n02795169/images/n02795169_105.JPEG'] - +
### Progressive resizing + > It is the technique to sequentially resize all the images while training the CNNs on smaller to bigger image sizes. Progressive Resizing is described briefly in his terrific fastai course, “Practical Deep Learning for Coders”. A great way to use this technique is to train a model with smaller image size say 64x64, then use the weights of this model to train another model on images of size 128x128 and so on. Each larger-scale model incorporates the previous smaller-scale model layers and weights in its architecture. ~[KDnuggets](https://www.kdnuggets.com/2019/05/boost-your-image-classification-model.html) -``` +```python image_sz_list = [(28, 28), (32, 32), (64, 64)] ds = Dataset(data_path, image_size=image_sz_list) ds.update_component('get_filenames', load_files) ds.update_component('get_label', get_label) - -print() # first call to generator for img, label in ds.generator(): print('first call to generator:', img.shape) @@ -177,21 +196,25 @@ for img, label in ds.generator(): for img, label in ds.generator(): print('third call to generator:', img.shape) break - ``` +
Output + get_filenames updated with get_label updated with - + first call to generator: (28, 28, 3) seconds call to generator: (32, 32, 3) third call to generator: (64, 64, 3) +
### tf.data support -Creating a `tf.data` dataloader was never as easy as this one liner. It converts the Python generator into `tf.data.Dataset` for a faster data loading, prefetching, caching and everything provided by tf.data. -``` +Creating a `tf.data` dataloader was never as easy as this one liner. It converts the Python generator +into `tf.data.Dataset` for a faster data loading, prefetching, caching and everything provided by tf.data. + +```python image_sz_list = [(28, 28), (32, 32), (64, 64)] ds = Dataset(data_path, image_size=image_sz_list) @@ -210,52 +233,49 @@ for e in dl.take(1): print(e[0].shape) ``` +
Output + get_filenames updated with - get_label updated with + get_label updated with (28, 28, 3) (32, 32, 3) (64, 64, 3) +
## Trainer -The Trainer class inherits from `tf.keras.Model`, it contains everything that is required for training. -It exposes trainer.cyclic_fit method which trains the model using Cyclic Learning rate discovered by [Leslie Smith](https://arxiv.org/abs/1506.01186). -``` +The Trainer class inherits from `tf.keras.Model`, it contains everything that is required for training. It exposes +trainer.cyclic_fit method which trains the model using Cyclic Learning rate discovered +by [Leslie Smith](https://arxiv.org/abs/1506.01186). + +```python from chitra.trainer import Trainer, create_cnn from chitra.datagenerator import Dataset -from PIL import Image -``` -``` -ds = Dataset(cat_dog_path, image_size=(224,224)) + +ds = Dataset(cat_dog_path, image_size=(224, 224)) model = create_cnn('mobilenetv2', num_classes=2, name='Cat_Dog_Model') trainer = Trainer(ds, model) # trainer.summary() ``` - WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default. - - -``` +```python trainer.compile2(batch_size=8, - optimizer=tf.keras.optimizers.SGD(1e-3, momentum=0.9, nesterov=True), - lr_range=(1e-6, 1e-3), - loss='binary_crossentropy', - metrics=['binary_accuracy']) -``` - - Model compiled! + optimizer=tf.keras.optimizers.SGD(1e-3, momentum=0.9, nesterov=True), + lr_range=(1e-6, 1e-3), + loss='binary_crossentropy', + metrics=['binary_accuracy']) - -``` trainer.cyclic_fit(epochs=5, - batch_size=8, - lr_range=(0.00001, 0.0001), - ) + batch_size=8, + lr_range=(0.00001, 0.0001), +) ``` +
Training Loop... cyclic learning rate already set! + Epoch 1/5 1/1 [==============================] - 0s 14ms/step - loss: 6.4702 - binary_accuracy: 0.2500 Epoch 2/5 @@ -271,24 +291,22 @@ trainer.cyclic_fit(epochs=5, Returning the last set size which is: (224, 224) 1/1 [==============================] - 0s 982us/step - loss: 1.9062 - binary_accuracy: 0.8750 - - - - +
+## ✨ Model Interpretability -## Model Visualization -It is important to understand what is going inside the model. Techniques like GradCam and Saliency Maps can visualize what the Network is learning. `trainer` module has InterpretModel class which creates GradCam and GradCam++ visualization with almost no additional code. +It is important to understand what is going inside the model. Techniques like GradCam and Saliency Maps can visualize +what the Network is learning. `trainer` module has InterpretModel class which creates GradCam and GradCam++ +visualization with almost no additional code. -``` +```python from chitra.trainer import InterpretModel + trainer = Trainer(ds, create_cnn('mobilenetv2', num_classes=1000, keras_applications=False)) model_interpret = InterpretModel(True, trainer) -``` -``` image = ds[1][0].numpy().astype('uint8') image = Image.fromarray(image) model_interpret(image) @@ -297,57 +315,87 @@ print(IMAGENET_LABELS[285]) Returning the last set size which is: (224, 224) index: 282 + Egyptian Mau +![png](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/images/output_22_1.png) +## 🎨 Data Visualization -![png]({{ nbs_image.base_url }}/output_22_1.png) +### Image annotation - Egyptian Mau +Bounding Box creation is based on top of `imgaug` library. +```python +from chitra.image import Chitra +import matplotlib.pyplot as plt -## Data Visualization +bbox = [70, 25, 190, 210] +label = 'Dog' -### Image annotation +image = Chitra(image_path, bboxes=bbox, labels=label) +plt.imshow(image.draw_boxes()) +``` -Thanks to [**fizyr**](https://github.com/fizyr/keras-retinanet) keras-retinanet. +![png](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/images/preview-bounding-box.png) -``` -from chitra.visualization import draw_annotations +See [Play with Images](https://chitra.readthedocs.io/en/latest/examples/chitra-class/chitra-class.html) for detailed +example! + +## 🚀 Model Serving (Framework Agnostic) + +Chitra can Create Rest API or Interactive UI app for Any Learning Model - +ML, DL, Image Classification, NLP, Tensorflow, PyTorch or SKLearn. +It provides `chitra.serve.GradioApp` for building Interactive UI app for ML/DL models +and `chitra.serve.API` for building Rest API endpoint. -labels = np.array([label]) -bbox = np.array([[30, 50, 170, 190]]) -label_to_name = lambda x: 'Cat' if x==0 else 'Dog' +```python +from chitra.serve import create_api +from chitra.trainer import create_cnn -draw_annotations(image, ({'bboxes': bbox, 'labels':labels,}), label_to_name=label_to_name) -plt.imshow(image) -plt.show() +model = create_cnn('mobilenetv2', num_classes=2) +create_api(model, run=True, api_type='image-classification') ``` +
API Docs Preview -![png]({{ nbs_image.base_url }}/output_24_0.png) +![Preview Model Server](https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/examples/model-server/preview.png) +
-## Utils +See [Example Section](https://chitra.readthedocs.io/en/latest/source/api/serve/model_server/) for detailed +explanation! -Limit GPU memory or enable dynamic GPU memory growth for Tensorflow +## 🛠 Utility -``` -from chitra.utils import limit_gpu, gpu_dynamic_mem_growth +Limit GPU memory or enable dynamic GPU memory growth for Tensorflow. + +```python +from chitra.utility.tf_utils import limit_gpu, gpu_dynamic_mem_growth # limit the amount of GPU required for your training -limit_gpu(gpu_id=0, memory_limit=1024*2) +limit_gpu(gpu_id=0, memory_limit=1024 * 2) ``` No GPU:0 found in your system! - -``` +```python gpu_dynamic_mem_growth() ``` No GPU found on the machine! +## 🤗 Contribute + +Contributions of any kind are welcome. Please check the [**Contributing +Guidelines**](https://github.com/aniketmaurya/chitra/blob/master/CONTRIBUTING.md) before contributing. + +## Code Of Conduct + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +Read full [**Contributor Covenant Code of Conduct**](https://github.com/aniketmaurya/chitra/blob/master/CODE_OF_CONDUCT.md) -## Contributing +## Acknowledgement -Contributions of any kind are welcome. Please check the [**Contributing Guidelines**](https://github.com/aniketmaurya/chitra/blob/master/CONTRIBUTING.md) before contributing. +*chitra* is built with help of awesome libraries like [Tensorflow 2.x](https://github.com/tensorflow/tensorflow), +[imgaug](https://github.com/aleju/imgaug), [FastAPI](https://github.com/tiangolo/fastapi) and [Gradio](https://gradio.app). diff --git a/docs/license.md b/docs/license.md index a6b0363f..cc53ecc2 100644 --- a/docs/license.md +++ b/docs/license.md @@ -200,4 +200,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -``` \ No newline at end of file +``` diff --git a/docs/old_source/_data/alerts.yml b/docs/old_source/_data/alerts.yml deleted file mode 100644 index 157e1622..00000000 --- a/docs/old_source/_data/alerts.yml +++ /dev/null @@ -1,15 +0,0 @@ -tip: '