From 243cd4215fe7662d409596de335b89100baecad3 Mon Sep 17 00:00:00 2001 From: "Christoph.Heindl" Date: Fri, 8 Apr 2022 05:39:03 +0200 Subject: [PATCH 1/9] fixed version parsing --- setup.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/setup.py b/setup.py index a933c91a..7682093a 100644 --- a/setup.py +++ b/setup.py @@ -5,36 +5,37 @@ """ import io import os + try: from setuptools import setup except ImportError: from distutils.core import setup -with io.open('requirements.txt') as f: +with io.open("requirements.txt") as f: required = f.read().splitlines() -with io.open('Readme.md', encoding='utf-8') as f: +with io.open("Readme.md", encoding="utf-8") as f: long_description = f.read() # Handle version number with optional .dev postfix when building a develop branch # on AppVeyor. -VERSION = io.open('motmetrics/__init__.py').readlines()[-1].split()[-1].strip('\'') -BUILD_NUMBER = os.environ.get('APPVEYOR_BUILD_NUMBER', None) -BRANCH_NAME = os.environ.get('APPVEYOR_REPO_BRANCH', 'develop') -if BUILD_NUMBER is not None and BRANCH_NAME != 'master': - VERSION = '{}.dev{}'.format(VERSION, BUILD_NUMBER) +VERSION = io.open("motmetrics/__init__.py").readlines()[-1].split()[-1].strip('"') +BUILD_NUMBER = os.environ.get("APPVEYOR_BUILD_NUMBER", None) +BRANCH_NAME = os.environ.get("APPVEYOR_REPO_BRANCH", "develop") +if BUILD_NUMBER is not None and BRANCH_NAME != "master": + VERSION = "{}.dev{}".format(VERSION, BUILD_NUMBER) setup( - name='motmetrics', + name="motmetrics", version=VERSION, - description='Metrics for multiple object tracker benchmarking.', - author='Christoph Heindl, Jack Valmadre', - url='https://github.com/cheind/py-motmetrics', - license='MIT', + description="Metrics for multiple object tracker benchmarking.", + author="Christoph Heindl, Jack Valmadre", + url="https://github.com/cheind/py-motmetrics", + license="MIT", install_requires=required, - packages=['motmetrics', 'motmetrics.tests', 'motmetrics.apps'], + packages=["motmetrics", "motmetrics.tests", "motmetrics.apps"], include_package_data=True, - keywords='tracker MOT evaluation metrics compare', + keywords="tracker MOT evaluation metrics compare", long_description=long_description, - long_description_content_type='text/markdown', + long_description_content_type="text/markdown", ) From 1ad168c1f194e2ae06102d368d561b2f4f82fda3 Mon Sep 17 00:00:00 2001 From: Christoph Heindl Date: Sun, 24 Apr 2022 18:41:11 +0200 Subject: [PATCH 2/9] Delete appveyor.yml --- appveyor.yml | 84 ---------------------------------------------------- 1 file changed, 84 deletions(-) delete mode 100644 appveyor.yml diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 192ec127..00000000 --- a/appveyor.yml +++ /dev/null @@ -1,84 +0,0 @@ -environment: - TWINE_USERNAME: cheind - TWINE_PASSWORD: - secure: hnxMBvmJAGM1rQVOUbkGvQ== - - # http://www.appveyor.com/docs/installed-software#python - matrix: - - PYTHON: "C:\\Miniconda36-x64" - PYTHON_VERSION: "3.6" - PYTHON_ARCH: "64" - - PYTHON: "C:\\Miniconda36" - PYTHON_VERSION: "3.6" - PYTHON_ARCH: "32" - - PYTHON: "C:\\Miniconda35-x64" - PYTHON_VERSION: "3.5" - PYTHON_ARCH: "64" - - PYTHON: "C:\\Miniconda35" - PYTHON_VERSION: "3.5" - PYTHON_ARCH: "32" - -install: - - set "CONDA_ROOT=%PYTHON%" - - set "PATH=%CONDA_ROOT%;%CONDA_ROOT%\Scripts;%CONDA_ROOT%\Library\bin;%PATH%" - - conda config --set always_yes yes - - conda update -q conda - - conda config --set auto_update_conda no - - conda install -q pip pytest pytest-benchmark numpy cython - - python -m pip install -U pip - - pip install wheel - - pip install --upgrade --ignore-installed setuptools - # Install solvers for testing. - - pip install lap lapsolver munkres - # OR-Tools does not support 32-bit. - # https://developers.google.com/optimization/install/python/windows - - ps: >- - if ($env:PYTHON_ARCH -eq "64") { - cmd /c 'pip install ortools 2>&1' - } - -build_script: - - python setup.py sdist - - python setup.py bdist_wheel - -test_script: - # Try building source wheel and install - # Redirect stderr of pip within powershell. - - ps: >- - $wheel = cmd /r dir .\dist\*.tar.gz /b/s; - cmd /c "pip install $wheel 2>&1" - - pytest --pyargs motmetrics - -deploy_script: - ps: >- - if ($env:PYTHON_VERSION -eq "3.6") { - Write-Output("Deploying..."); - pip install --upgrade twine; - if ($env:APPVEYOR_REPO_BRANCH -eq "master") { - Write-Output("Deploying to PyPI") - # If powershell ever sees anything on stderr it thinks it's a fail. - # So we use cmd to redirect stderr to stdout before PS can see it. - cmd /c 'twine upload --skip-existing dist\* 2>&1' - } elseif ($env:APPVEYOR_REPO_BRANCH -eq "develop") { - Write-Output("Deploying to PyPI-Test") - cmd /c 'twine upload --skip-existing --verbose --repository testpypi dist\* 2>&1' - } - } - -artifacts: - - path: "dist\\*.whl" - - path: "dist\\*.tar.gz" - name: Wheels - -notifications: - - provider: Email - to: - - christoph.heindl@email.com - on_build_success: true - on_build_failure: true - -branches: - only: - - master - - develop - - /feature.*/ From 008f45c563289bf4daf93cc561e8279af73ad2b9 Mon Sep 17 00:00:00 2001 From: whizmo <6362306+whizmo@users.noreply.github.com> Date: Wed, 31 Aug 2022 18:28:47 +0200 Subject: [PATCH 3/9] Corrected motmetrics.utils.compare_to_groundtruth(,,dist='euc') to use euclidean distances. (#168) The squared distance can still be used with dist='seuc'. Co-authored-by: Angel Carro-Lagoa --- motmetrics/utils.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/motmetrics/utils.py b/motmetrics/utils.py index d0c64714..0b70a56a 100644 --- a/motmetrics/utils.py +++ b/motmetrics/utils.py @@ -37,7 +37,8 @@ def compare_to_groundtruth(gt, dt, dist='iou', distfields=None, distth=0.5): Kwargs ------ dist : str, optional - String identifying distance to be used. Defaults to intersection over union. + String identifying distance to be used. Defaults to intersection over union ('iou'). Euclidean + distance ('euc') and squared euclidean distance ('seuc') are also supported. distfields: array, optional Fields relevant for extracting distance information. Defaults to ['X', 'Y', 'Width', 'Height'] distth: float, optional @@ -51,9 +52,19 @@ def compute_iou(a, b): return iou_matrix(a, b, max_iou=distth) def compute_euc(a, b): + return np.sqrt(norm2squared_matrix(a, b, max_d2=distth**2)) + + def compute_seuc(a, b): return norm2squared_matrix(a, b, max_d2=distth) - compute_dist = compute_iou if dist.upper() == 'IOU' else compute_euc + if dist.upper() == 'IOU': + compute_dist = compute_iou + elif dist.upper() == 'EUC': + compute_dist = compute_euc + elif dist.upper() == 'SEUC': + compute_dist = compute_seuc + else: + raise f'Unknown distance metric {dist}. Use "IOU", "EUC" or "SEUC"' acc = MOTAccumulator() From c30f3fb5a0a77215efc4c7bcfe1ed09d1c12586f Mon Sep 17 00:00:00 2001 From: Ardeshir Shojaeinasab Date: Fri, 25 Nov 2022 06:43:34 -0800 Subject: [PATCH 4/9] Update eval_detrac.py (#171) Logging is never executed and always raises an error which is not correct. I made the changes in a way that the log level is checked with valid values and cast into integer. Please accept the change because debugging is very hard for now. --- motmetrics/apps/eval_detrac.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/motmetrics/apps/eval_detrac.py b/motmetrics/apps/eval_detrac.py index f030f178..9da46764 100644 --- a/motmetrics/apps/eval_detrac.py +++ b/motmetrics/apps/eval_detrac.py @@ -87,8 +87,10 @@ def main(): args = parse_args() loglevel = getattr(logging, args.loglevel.upper(), None) - if not isinstance(loglevel, int): + + if loglevel not in ["0","10","20","30","40","50"]: # previous code was always raising error, this code is correct [Ardeshir Shon] raise ValueError('Invalid log level: {} '.format(args.loglevel)) + loglevel = int(loglevel) # This type casting is needed regarding the logging library documentation logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S') if args.solver: From fa288b66424ac944e65fc18af2f9dc09416155d4 Mon Sep 17 00:00:00 2001 From: Alexander Litzenberger Date: Fri, 23 Dec 2022 10:43:53 -0500 Subject: [PATCH 5/9] Get ride of np.bool deprecation warning. (#155) * Get rid of DeprecationWarning * Get ride of np.bool deprecation warning. --- motmetrics/mot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/motmetrics/mot.py b/motmetrics/mot.py index a6bf0e76..4ffc3adf 100644 --- a/motmetrics/mot.py +++ b/motmetrics/mot.py @@ -175,9 +175,9 @@ def update(self, oids, hids, dists, frameid=None, vf=''): self.dirty_events = True oids = np.asarray(oids) - oids_masked = np.zeros_like(oids, dtype=np.bool) + oids_masked = np.zeros_like(oids, dtype=np.bool_) hids = np.asarray(hids) - hids_masked = np.zeros_like(hids, dtype=np.bool) + hids_masked = np.zeros_like(hids, dtype=np.bool_) dists = np.atleast_2d(dists).astype(float).reshape(oids.shape[0], hids.shape[0]).copy() if frameid is None: From 656079fab58bfbc32d88911e9ac03d6f666c4c49 Mon Sep 17 00:00:00 2001 From: Christoph Heindl Date: Fri, 23 Dec 2022 16:52:33 +0100 Subject: [PATCH 6/9] downgraded to latest supported version of ortools --- .github/workflows/python-package.yml | 63 +++++++------ Readme.md | 134 +++++++++++++++------------ 2 files changed, 107 insertions(+), 90 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 208e3f41..fa80959f 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -4,38 +4,37 @@ name: Python package on: - push: - branches: [ develop ] - pull_request: - branches: [ develop ] + push: + branches: [develop] + pull_request: + branches: [develop] jobs: - build: + build: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.8", "3.9", "3.10"] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: ["3.8", "3.9", "3.10"] - - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - python -m pip install flake8 pytest pytest-benchmark - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - pip install lap scipy ortools lapsolver munkres - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - run: | - pytest + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest pytest-benchmark + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + pip install lap scipy "ortools<9.5" lapsolver munkres + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + pytest diff --git a/Readme.md b/Readme.md index 0b603c90..f19571ad 100644 --- a/Readme.md +++ b/Readme.md @@ -10,27 +10,30 @@ While benchmarking single object trackers is rather straightforward, measuring t ![](./motmetrics/etc/mot.png)
-*Pictures courtesy of Bernardin, Keni, and Rainer Stiefelhagen [[1]](#References)* +_Pictures courtesy of Bernardin, Keni, and Rainer Stiefelhagen [[1]](#References)_ + In particular **py-motmetrics** supports `CLEAR-MOT`[[1,2]](#References) metrics and `ID`[[4]](#References) metrics. Both metrics attempt to find a minimum cost assignment between ground truth objects and predictions. However, while CLEAR-MOT solves the assignment problem on a local per-frame basis, `ID-MEASURE` solves the bipartite graph matching by finding the minimum cost of objects and predictions over all frames. This [blog-post](https://web.archive.org/web/20190413133409/http://vision.cs.duke.edu:80/DukeMTMC/IDmeasures.html) by Ergys illustrates the differences in more detail. ## Features at a glance -- *Variety of metrics*
-Provides MOTA, MOTP, track quality measures, global ID measures and more. The results are [comparable](#MOTChallengeCompatibility) with the popular [MOTChallenge][MOTChallenge] benchmarks [(*1)](#asterixcompare). -- *Distance agnostic*
-Supports Euclidean, Intersection over Union and other distances measures. -- *Complete event history*
-Tracks all relevant per-frame events suchs as correspondences, misses, false alarms and switches. -- *Flexible solver backend*
-Support for switching minimum assignment cost solvers. Supports `scipy`, `ortools`, `munkres` out of the box. Auto-tunes solver selection based on [availability and problem size](#SolverBackends). -- *Easy to extend*
-Events and summaries are utilizing [pandas][pandas] for data structures and analysis. New metrics can reuse already computed values from depending metrics. + +- _Variety of metrics_
+ Provides MOTA, MOTP, track quality measures, global ID measures and more. The results are [comparable](#MOTChallengeCompatibility) with the popular [MOTChallenge][motchallenge] benchmarks [(\*1)](#asterixcompare). +- _Distance agnostic_
+ Supports Euclidean, Intersection over Union and other distances measures. +- _Complete event history_
+ Tracks all relevant per-frame events suchs as correspondences, misses, false alarms and switches. +- _Flexible solver backend_
+ Support for switching minimum assignment cost solvers. Supports `scipy`, `ortools`, `munkres` out of the box. Auto-tunes solver selection based on [availability and problem size](#SolverBackends). +- _Easy to extend_
+ Events and summaries are utilizing [pandas][pandas] for data structures and analysis. New metrics can reuse already computed values from depending metrics. + ## Metrics -**py-motmetrics** implements the following metrics. The metrics have been aligned with what is reported by [MOTChallenge][MOTChallenge] benchmarks. +**py-motmetrics** implements the following metrics. The metrics have been aligned with what is reported by [MOTChallenge][motchallenge] benchmarks. ```python import motmetrics as mm @@ -39,42 +42,41 @@ mh = mm.metrics.create() print(mh.list_metrics_markdown()) ``` -Name|Description -:---|:--- -num_frames|Total number of frames. -num_matches|Total number matches. -num_switches|Total number of track switches. -num_false_positives|Total number of false positives (false-alarms). -num_misses|Total number of misses. -num_detections|Total number of detected objects including matches and switches. -num_objects|Total number of unique object appearances over all frames. -num_predictions|Total number of unique prediction appearances over all frames. -num_unique_objects|Total number of unique object ids encountered. -mostly_tracked|Number of objects tracked for at least 80 percent of lifespan. -partially_tracked|Number of objects tracked between 20 and 80 percent of lifespan. -mostly_lost|Number of objects tracked less than 20 percent of lifespan. -num_fragmentations|Total number of switches from tracked to not tracked. -motp|Multiple object tracker precision. -mota|Multiple object tracker accuracy. -precision|Number of detected objects over sum of detected and false positives. -recall|Number of detections over number of objects. -idfp|ID measures: Number of false positive matches after global min-cost matching. -idfn|ID measures: Number of false negatives matches after global min-cost matching. -idtp|ID measures: Number of true positives matches after global min-cost matching. -idp|ID measures: global min-cost precision. -idr|ID measures: global min-cost recall. -idf1|ID measures: global min-cost F1 score. -obj_frequencies|`pd.Series` Total number of occurrences of individual objects over all frames. -pred_frequencies|`pd.Series` Total number of occurrences of individual predictions over all frames. -track_ratios|`pd.Series` Ratio of assigned to total appearance count per unique object id. -id_global_assignment| `dict` ID measures: Global min-cost assignment for ID measures. - - +| Name | Description | +| :------------------- | :--------------------------------------------------------------------------------- | +| num_frames | Total number of frames. | +| num_matches | Total number matches. | +| num_switches | Total number of track switches. | +| num_false_positives | Total number of false positives (false-alarms). | +| num_misses | Total number of misses. | +| num_detections | Total number of detected objects including matches and switches. | +| num_objects | Total number of unique object appearances over all frames. | +| num_predictions | Total number of unique prediction appearances over all frames. | +| num_unique_objects | Total number of unique object ids encountered. | +| mostly_tracked | Number of objects tracked for at least 80 percent of lifespan. | +| partially_tracked | Number of objects tracked between 20 and 80 percent of lifespan. | +| mostly_lost | Number of objects tracked less than 20 percent of lifespan. | +| num_fragmentations | Total number of switches from tracked to not tracked. | +| motp | Multiple object tracker precision. | +| mota | Multiple object tracker accuracy. | +| precision | Number of detected objects over sum of detected and false positives. | +| recall | Number of detections over number of objects. | +| idfp | ID measures: Number of false positive matches after global min-cost matching. | +| idfn | ID measures: Number of false negatives matches after global min-cost matching. | +| idtp | ID measures: Number of true positives matches after global min-cost matching. | +| idp | ID measures: global min-cost precision. | +| idr | ID measures: global min-cost recall. | +| idf1 | ID measures: global min-cost F1 score. | +| obj_frequencies | `pd.Series` Total number of occurrences of individual objects over all frames. | +| pred_frequencies | `pd.Series` Total number of occurrences of individual predictions over all frames. | +| track_ratios | `pd.Series` Ratio of assigned to total appearance count per unique object id. | +| id_global_assignment | `dict` ID measures: Global min-cost assignment for ID measures. | + ## MOTChallenge compatibility -**py-motmetrics** produces results compatible with popular [MOTChallenge][MOTChallenge] benchmarks [(*1)](#asterixcompare). Below are two results taken from MOTChallenge [Matlab devkit][devkit] corresponding to the results of the CEM tracker on the training set of the 2015 MOT 2DMark. +**py-motmetrics** produces results compatible with popular [MOTChallenge][motchallenge] benchmarks [(\*1)](#asterixcompare). Below are two results taken from MOTChallenge [Matlab devkit][devkit] corresponding to the results of the CEM tracker on the training set of the 2015 MOT 2DMark. ``` @@ -96,15 +98,19 @@ TUD-Campus 55.8% 73.0% 45.1% 58.2% 94.1% 8 1 6 1 13 150 7 7 52.6% 0. TUD-Stadtmitte 64.5% 82.0% 53.1% 60.9% 94.0% 10 5 4 1 45 452 7 6 56.4% 0.346 ``` -(*1) Besides naming conventions, the only obvious differences are -- Metric `FAR` is missing. This metric is given implicitly and can be recovered by `FalsePos / Frames * 100`. -- Metric `MOTP` seems to be off. To convert compute `(1 - MOTP) * 100`. [MOTChallenge][MOTChallenge] benchmarks compute `MOTP` as percentage, while **py-motmetrics** sticks to the original definition of average distance over number of assigned objects [[1]](#References). +(\*1) Besides naming conventions, the only obvious differences are + +- Metric `FAR` is missing. This metric is given implicitly and can be recovered by `FalsePos / Frames * 100`. +- Metric `MOTP` seems to be off. To convert compute `(1 - MOTP) * 100`. [MOTChallenge][motchallenge] benchmarks compute `MOTP` as percentage, while **py-motmetrics** sticks to the original definition of average distance over number of assigned objects [[1]](#References). You can compare tracker results to ground truth in MOTChallenge format by + ``` python -m motmetrics.apps.eval_motchallenge --help ``` + For MOT16/17, you can run + ``` python -m motmetrics.apps.evaluateTracking --help ``` @@ -117,8 +123,8 @@ To install latest development version of **py-motmetrics** (usually a bit more r pip install git+https://github.com/cheind/py-motmetrics.git ``` - ### Install via PyPi + To install **py-motmetrics** use `pip` ``` @@ -134,6 +140,7 @@ pip install -e ``` ### Install via Conda + In case you are using Conda, a simple way to run **py-motmetrics** is to create a virtual environment with all the necessary dependencies ``` @@ -261,6 +268,7 @@ Event Object `2` is now tracked by hypothesis `3` leading to a track switch. Note, although a pairing `(1, 3)` with cost less than 0.6 is possible, the algorithm prefers prefers to continue track assignments from past frames which is a property of MOT metrics. ### Computing metrics + Once the accumulator has been populated you can compute and display metrics. Continuing the example from above ```python @@ -355,6 +363,7 @@ OVERALL 80.0% 80.0% 80.0% 80.0% 80.0% 4 2 2 0 2 2 1 1 50.0% 0.275 ``` ### Computing distances + Up until this point we assumed the pairwise object/hypothesis distances to be known. Usually this is not the case. You are mostly given either rectangles or points (centroids) of related objects. To compute a distance matrix from them you can use `motmetrics.distance` module as shown below. #### Euclidean norm squared on points @@ -383,6 +392,7 @@ C = mm.distances.norm2squared_matrix(o, h, max_d2=5.) ``` #### Intersection over union norm for 2D rectangles + ```python a = np.array([ [0, 0, 1, 2], # Format X, Y, Width, Height @@ -403,13 +413,16 @@ mm.distances.iou_matrix(a, b, max_iou=0.5) ``` + ### Solver backends + For large datasets solving the minimum cost assignment becomes the dominant runtime part. **py-motmetrics** therefore supports these solvers out of the box - - `lapsolver` - https://github.com/cheind/py-lapsolver - - `lapjv` - https://github.com/gatagat/lap - - `scipy` - https://github.com/scipy/scipy/tree/master/scipy - - `ortools` - https://github.com/google/or-tools - - `munkres` - http://software.clapper.org/munkres/ + +- `lapsolver` - https://github.com/cheind/py-lapsolver +- `lapjv` - https://github.com/gatagat/lap +- `scipy` - https://github.com/scipy/scipy/tree/master/scipy +- `ortools<9.5` - https://github.com/google/or-tools +- `munkres` - http://software.clapper.org/munkres/ A comparison for different sized matrices is shown below (taken from [here](https://github.com/cheind/py-lapsolver#benchmarks)) @@ -427,20 +440,24 @@ with lap.set_default_solver(mysolver): ``` ## Running tests + **py-motmetrics** uses the pytest framework. To run the tests, simply `cd` into the source directly and run `pytest`. + ### References + 1. Bernardin, Keni, and Rainer Stiefelhagen. "Evaluating multiple object tracking performance: the CLEAR MOT metrics." -EURASIP Journal on Image and Video Processing 2008.1 (2008): 1-10. + EURASIP Journal on Image and Video Processing 2008.1 (2008): 1-10. 2. Milan, Anton, et al. "Mot16: A benchmark for multi-object tracking." arXiv preprint arXiv:1603.00831 (2016). 3. Li, Yuan, Chang Huang, and Ram Nevatia. "Learning to associate: Hybridboosted multi-target tracker for crowded scene." -Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on. IEEE, 2009. + Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on. IEEE, 2009. 4. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. E. Ristani, F. Solera, R. S. Zou, R. Cucchiara and C. Tomasi. ECCV 2016 Workshop on Benchmarking Multi-Target Tracking. ## Docker ### Update ground truth and test data: + /data/train directory should contain MOT 2D 2015 Ground Truth files. /data/test directory should contain your results. @@ -448,9 +465,11 @@ You can check usage and directory listing at https://github.com/cheind/py-motmetrics/blob/master/motmetrics/apps/eval_motchallenge.py ### Build Image + docker build -t desired-image-name -f Dockerfile . ### Run Image + docker run desired-image-name (credits to [christosavg](https://github.com/christosavg)) @@ -483,7 +502,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ``` - -[Pandas]: http://pandas.pydata.org/ -[MOTChallenge]: https://motchallenge.net/ +[pandas]: http://pandas.pydata.org/ +[motchallenge]: https://motchallenge.net/ [devkit]: https://motchallenge.net/devkit/ From ea0a55e1e7c6cf9fe64aed3629b521665dc7f4ab Mon Sep 17 00:00:00 2001 From: Christoph Heindl Date: Fri, 23 Dec 2022 16:54:31 +0100 Subject: [PATCH 7/9] downgraded ortools package --- .github/workflows/python-package.yml | 2 +- Readme.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index fa80959f..885cf854 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -28,7 +28,7 @@ jobs: python -m pip install --upgrade pip python -m pip install flake8 pytest pytest-benchmark if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - pip install lap scipy "ortools<9.5" lapsolver munkres + pip install lap scipy "ortools<9.4" lapsolver munkres - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names diff --git a/Readme.md b/Readme.md index f19571ad..cba693e2 100644 --- a/Readme.md +++ b/Readme.md @@ -421,7 +421,7 @@ For large datasets solving the minimum cost assignment becomes the dominant runt - `lapsolver` - https://github.com/cheind/py-lapsolver - `lapjv` - https://github.com/gatagat/lap - `scipy` - https://github.com/scipy/scipy/tree/master/scipy -- `ortools<9.5` - https://github.com/google/or-tools +- `ortools<9.4` - https://github.com/google/or-tools - `munkres` - http://software.clapper.org/munkres/ A comparison for different sized matrices is shown below (taken from [here](https://github.com/cheind/py-lapsolver#benchmarks)) From 9cb9bfe2cac16f71a1f8d2ba28c26314ff82dc65 Mon Sep 17 00:00:00 2001 From: Christoph Heindl Date: Mon, 26 Dec 2022 10:07:03 +0100 Subject: [PATCH 8/9] version bump --- motmetrics/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/motmetrics/__init__.py b/motmetrics/__init__.py index 9ad6aa5e..ff8eabbb 100644 --- a/motmetrics/__init__.py +++ b/motmetrics/__init__.py @@ -32,4 +32,4 @@ from motmetrics.mot import MOTAccumulator # Needs to be last line -__version__ = "1.2.5" +__version__ = "1.4.0" From b72d48a2becdbfef3e99673a1979739dee248d4e Mon Sep 17 00:00:00 2001 From: Christoph Heindl Date: Mon, 26 Dec 2022 10:07:19 +0100 Subject: [PATCH 9/9] ignoring .venv --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index d1e2109f..c06cbbb5 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,4 @@ Temporary Items *.egg-info/ build/ dist/ +.venv/ \ No newline at end of file