From 4dd99b7ee8934e584173a471f434298bb8b823c1 Mon Sep 17 00:00:00 2001 From: Leonardo Christino Date: Tue, 1 Jun 2021 12:45:36 -0300 Subject: [PATCH] First Commit --- .gitignore | 213 ++++ LICENCE | 21 + MANIFEST.in | 0 README.md | 71 ++ examples/.gitignore | 1 + examples/__init__.py | 0 examples/mammals.data | 100 ++ examples/mammmals_cpu.py | 43 + examples/mammmals_large_cpu.py | 44 + examples/mammmals_large_gpu.py | 44 + neo_force_scheme/__init__.py | 2 + neo_force_scheme/_neo_force_scheme.py | 317 ++++++ neo_force_scheme/cuda_utils.py | 56 ++ neo_force_scheme/distances.py | 1300 +++++++++++++++++++++++++ neo_force_scheme/kruskal_stress.py | 25 + neo_force_scheme/utils.py | 99 ++ requirements.txt | 5 + setup.py | 29 + 18 files changed, 2370 insertions(+) create mode 100644 .gitignore create mode 100644 LICENCE create mode 100644 MANIFEST.in create mode 100644 README.md create mode 100644 examples/.gitignore create mode 100644 examples/__init__.py create mode 100644 examples/mammals.data create mode 100644 examples/mammmals_cpu.py create mode 100644 examples/mammmals_large_cpu.py create mode 100644 examples/mammmals_large_gpu.py create mode 100644 neo_force_scheme/__init__.py create mode 100644 neo_force_scheme/_neo_force_scheme.py create mode 100644 neo_force_scheme/cuda_utils.py create mode 100644 neo_force_scheme/distances.py create mode 100644 neo_force_scheme/kruskal_stress.py create mode 100644 neo_force_scheme/utils.py create mode 100644 requirements.txt create mode 100644 setup.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0810149 --- /dev/null +++ b/.gitignore @@ -0,0 +1,213 @@ +local_storage/ + +# Created by .ignore support plugin (hsz.mobi) +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject +### VirtualEnv template +# Virtualenv +# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ +.Python +[Bb]in +[Ii]nclude +[Ll]ib +[Ll]ib64 +[Ll]ocal +[Ss]cripts +pyvenv.cfg +.venv +pip-selfcheck.json +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/workspace.xml +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +.idea/ + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +.nox/ +*.cover +*.py,cover +.pytest_cache/ +cover/ +db.sqlite3 +db.sqlite3-journal +.pybuilder/ + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +env.bak/ +venv.bak/ + +# Spyder project settings +.spyproject + +# Rope project settings + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ \ No newline at end of file diff --git a/LICENCE b/LICENCE new file mode 100644 index 0000000..8a542a1 --- /dev/null +++ b/LICENCE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 Fernando Paulovich, Leonardo Christino + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..e69de29 diff --git a/README.md b/README.md new file mode 100644 index 0000000..92e823a --- /dev/null +++ b/README.md @@ -0,0 +1,71 @@ +# NeoForceScheme + +A new library for extended and performance-focused ForceScheme implementation. + +## QuickStart + +You can find examples for [simple data](./examples/mammals_cpu.py), [large data with cpu](./examples/mammmals_large_cpu.py), +and [large data with gpu](./examples/mammmals_large_gpu.py) + +To run the projection: +```python +import numpy as np +from neo_force_scheme import NeoForceScheme + +dataset = np.random.random((100, 100)) # Some dataset + +nfs = NeoForceScheme() +projection = nfs.fit_transform(dataset) +``` + +To use GPU, be sure to have CUDA toolkit installed. +```python +import numpy as np +from neo_force_scheme import NeoForceScheme + +dataset = np.random.random((100, 100)) # Some dataset + +nfs = NeoForceScheme(cuda=True) +projection = nfs.fit_transform(dataset) +``` + +### Kruskal Stress +```python +import numpy as np +from neo_force_scheme import NeoForceScheme, kruskal_stress + +dataset = np.random.random((100, 100)) # Some dataset + +nfs = NeoForceScheme(cuda=True) +projection = nfs.fit_transform(dataset) + +stress = kruskal_stress(nfs.embedding_, projection) +``` + +### Plot with matplotlib +```python +import matplotlib.pyplot as plt +import numpy as np +from neo_force_scheme import NeoForceScheme +from matplotlib.colors import ListedColormap + +dataset = np.random.random((100, 100)) # Some dataset without labels +labels = np.random.random(100) # Per-row labels + +nfs = NeoForceScheme(cuda=True) +projection = nfs.fit_transform(dataset) + +plt.figure() +plt.scatter(projection[:, 0], + projection[:, 1], + c=labels, + cmap=ListedColormap(['blue', 'red', 'green']), + edgecolors='face', + linewidths=0.5, + s=4) +plt.grid(linestyle='dotted') +plt.show() +``` + +## API +More information can be found at [our documentation page](#) \ No newline at end of file diff --git a/examples/.gitignore b/examples/.gitignore new file mode 100644 index 0000000..489ce0f --- /dev/null +++ b/examples/.gitignore @@ -0,0 +1 @@ +old \ No newline at end of file diff --git a/examples/__init__.py b/examples/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/mammals.data b/examples/mammals.data new file mode 100644 index 0000000..300b0b9 --- /dev/null +++ b/examples/mammals.data @@ -0,0 +1,100 @@ +0.00,0.00,0.00,1.00,0.00,0.00,17.74,3.43,175,8.87,3.43,-6.24,0.79,0.00,-1.00,5.63,0.60,173,8.87,-3.43,-6.24,0.00,0.00,-1.00,5.63,0.57,179,-8.87,3.43,-6.24,0.00,0.00,-1.00,5.63,0.64,175,-8.87,-3.43,-6.24,0.45,0.00,-1.00,5.63,0.77,175,10.30,0.00,3.43,1.00,0.00,1.00,2.86,2.27,177,11.73,0.00,3.43,1.00,0.19,0.00,4.08,2.37,175,-3.19,0.00,0.00,-1.00,-0.01,-0.04,14.55,0.47,171,2 +0.00,0.00,0.00,1.00,0.00,0.00,24.98,6.60,176,12.49,6.60,-12.71,0.48,0.00,-1.00,12.22,1.11,175,12.49,-6.60,-12.71,0.00,0.00,-1.00,12.22,0.93,172,-12.49,6.60,-12.71,0.00,0.00,-1.00,12.22,0.98,175,-12.49,-6.60,-12.71,0.45,0.00,-1.00,12.22,1.19,175,15.38,0.00,6.60,1.00,0.00,1.00,5.79,2.11,178,18.27,0.00,6.60,1.00,0.36,0.00,11.73,3.06,175,-18.47,0.00,0.00,-1.00,0.11,-0.04,6.51,0.29,174,1 +0.00,0.00,0.00,1.00,0.00,0.00,25.45,7.01,175,12.72,7.01,-13.40,0.26,0.00,-1.00,12.79,0.96,174,12.72,-7.01,-13.40,0.00,0.00,-1.00,12.79,1.01,175,-12.72,7.01,-13.40,0.00,0.00,-1.00,12.79,0.83,176,-12.72,-7.01,-13.40,0.44,0.00,-1.00,12.79,1.02,172,15.56,0.00,7.01,1.00,0.00,1.00,5.66,2.63,175,18.39,0.00,7.01,1.00,0.70,0.00,11.62,2.65,174,-18.55,0.00,0.00,-1.00,-0.27,-0.16,6.90,0.24,173,1 +0.00,0.00,0.00,1.00,0.00,0.00,20.70,3.32,175,10.35,3.32,-7.23,0.50,0.00,-1.00,7.82,0.74,177,10.35,-3.32,-7.23,0.00,0.00,-1.00,7.82,0.61,176,-10.35,3.32,-7.23,0.00,0.00,-1.00,7.82,0.63,171,-10.35,-3.32,-7.23,0.25,0.00,-1.00,7.82,0.58,174,11.88,0.00,3.32,1.00,0.00,1.00,3.06,2.19,173,13.41,0.00,3.32,1.00,-0.04,0.00,3.65,2.05,174,-6.62,0.00,0.00,-1.00,0.08,-0.27,14.09,0.51,173,2 +0.00,0.00,0.00,1.00,0.00,0.00,64.89,14.91,172,32.44,14.91,-46.63,0.26,0.00,-1.00,63.44,2.97,173,32.44,-14.91,-46.63,0.00,0.00,-1.00,63.44,2.98,175,-32.44,14.91,-46.63,0.00,0.00,-1.00,63.44,3.10,174,-32.44,-14.91,-46.63,0.32,0.00,-1.00,63.44,3.26,174,51.03,0.00,33.50,1.00,0.00,1.00,52.56,7.22,173,69.62,0.00,52.08,1.00,-0.01,0.00,20.31,4.45,176,-41.57,0.00,0.00,-1.00,0.35,-0.48,23.32,0.75,176,4 +0.00,0.00,0.00,1.00,0.00,0.00,27.23,5.75,178,13.61,5.75,-12.53,0.59,0.00,-1.00,13.57,1.03,176,13.61,-5.75,-12.53,0.00,0.00,-1.00,13.57,0.94,174,-13.61,5.75,-12.53,0.00,0.00,-1.00,13.57,0.99,174,-13.61,-5.75,-12.53,0.65,0.00,-1.00,13.57,0.93,179,16.38,0.00,5.75,1.00,0.00,1.00,5.53,2.51,171,19.14,0.00,5.75,1.00,-0.85,0.00,12.52,3.28,173,-20.71,0.00,0.00,-1.00,-0.10,-0.42,6.52,0.28,172,1 +0.00,0.00,0.00,1.00,0.00,0.00,27.69,6.31,175,13.84,6.31,-12.66,0.75,0.00,-1.00,12.70,1.20,174,13.84,-6.31,-12.66,0.00,0.00,-1.00,12.70,1.14,174,-13.84,6.31,-12.66,0.00,0.00,-1.00,12.70,1.06,175,-13.84,-6.31,-12.66,0.38,0.00,-1.00,12.70,1.02,178,16.46,0.00,6.31,1.00,0.00,1.00,5.23,2.42,174,19.08,0.00,6.31,1.00,-0.12,0.00,11.22,2.95,175,-20.76,0.00,0.00,-1.00,0.49,1.00,6.93,0.36,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,65.10,14.07,178,32.55,14.07,-45.32,0.22,0.00,-1.00,62.49,2.97,170,32.55,-14.07,-45.32,0.00,0.00,-1.00,62.49,2.92,175,-32.55,14.07,-45.32,0.00,0.00,-1.00,62.49,2.99,176,-32.55,-14.07,-45.32,0.09,0.00,-1.00,62.49,3.14,173,48.62,0.00,30.14,1.00,0.00,1.00,45.45,6.68,173,64.69,0.00,46.21,1.00,0.18,0.00,21.54,4.67,175,-41.72,0.00,0.00,-1.00,-0.39,-0.46,23.38,0.88,176,4 +0.00,0.00,0.00,1.00,0.00,0.00,69.09,14.85,170,34.55,14.85,-47.38,0.28,0.00,-1.00,65.05,3.27,177,34.55,-14.85,-47.38,0.00,0.00,-1.00,65.05,3.27,177,-34.55,14.85,-47.38,0.00,0.00,-1.00,65.05,3.12,172,-34.55,-14.85,-47.38,0.29,0.00,-1.00,65.05,2.95,172,52.68,0.00,32.98,1.00,0.00,1.00,51.28,6.96,172,70.81,0.00,51.12,1.00,-0.10,0.00,21.17,4.54,174,-46.45,0.00,0.00,-1.00,-0.85,-0.95,22.65,0.74,175,4 +0.00,0.00,0.00,1.00,0.00,0.00,19.10,3.48,174,9.55,3.48,-6.83,0.30,0.00,-1.00,6.69,0.80,175,9.55,-3.48,-6.83,0.00,0.00,-1.00,6.69,0.73,175,-9.55,3.48,-6.83,0.00,0.00,-1.00,6.69,0.78,174,-9.55,-3.48,-6.83,0.55,0.00,-1.00,6.69,0.51,179,11.41,0.00,3.48,1.00,0.00,1.00,3.72,1.82,175,13.27,0.00,3.48,1.00,0.02,0.00,4.32,1.76,177,-1.67,0.00,0.00,-1.00,-0.55,0.06,17.43,0.61,179,2 +0.00,0.00,0.00,1.00,0.00,0.00,18.28,3.97,176,9.14,3.97,-7.68,0.56,0.00,-1.00,7.41,0.70,174,9.14,-3.97,-7.68,0.00,0.00,-1.00,7.41,0.80,174,-9.14,3.97,-7.68,0.00,0.00,-1.00,7.41,0.59,171,-9.14,-3.97,-7.68,0.65,0.00,-1.00,7.41,0.63,172,10.72,0.00,3.97,1.00,0.00,1.00,3.15,2.04,174,12.29,0.00,3.97,1.00,-0.01,0.00,4.86,1.71,177,-4.71,0.00,0.00,-1.00,0.13,-0.34,13.57,0.47,172,2 +0.00,0.00,0.00,1.00,0.00,0.00,25.18,6.46,174,12.59,6.46,-12.99,0.65,0.00,-1.00,13.07,0.90,172,12.59,-6.46,-12.99,0.00,0.00,-1.00,13.07,0.90,175,-12.59,6.46,-12.99,0.00,0.00,-1.00,13.07,1.00,175,-12.59,-6.46,-12.99,0.70,0.00,-1.00,13.07,0.98,170,15.26,0.00,6.46,1.00,0.00,1.00,5.34,2.61,174,17.94,0.00,6.46,1.00,0.01,0.00,11.67,3.00,174,-18.03,0.00,0.00,-1.00,-0.11,-0.61,7.16,0.30,174,1 +0.00,0.00,0.00,1.00,0.00,0.00,53.04,11.85,174,26.52,11.85,-31.92,0.11,0.00,-1.00,40.13,3.33,176,26.52,-11.85,-31.92,0.00,0.00,-1.00,40.13,2.66,175,-26.52,11.85,-31.92,0.00,0.00,-1.00,40.13,3.14,172,-26.52,-11.85,-31.92,0.05,0.00,-1.00,40.13,2.50,178,31.47,0.00,16.80,1.00,0.00,1.00,14.00,6.39,177,36.42,0.00,21.75,1.00,0.01,0.00,20.32,5.02,175,-25.63,0.00,0.00,-1.00,-0.51,-0.48,27.41,1.49,174,3 +0.00,0.00,0.00,1.00,0.00,0.00,53.51,12.50,176,26.75,12.50,-33.47,0.10,0.00,-1.00,41.94,2.58,171,26.75,-12.50,-33.47,0.00,0.00,-1.00,41.94,2.82,174,-26.75,12.50,-33.47,0.00,0.00,-1.00,41.94,2.68,175,-26.75,-12.50,-33.47,0.16,0.00,-1.00,41.94,2.78,172,32.37,0.00,18.11,1.00,0.00,1.00,15.88,6.08,175,37.98,0.00,23.73,1.00,-0.19,0.00,18.65,5.15,179,-23.78,0.00,0.00,-1.00,0.12,-0.53,29.72,1.40,173,3 +0.00,0.00,0.00,1.00,0.00,0.00,57.14,12.29,174,28.57,12.29,-32.33,0.30,0.00,-1.00,40.08,2.48,172,28.57,-12.29,-32.33,0.00,0.00,-1.00,40.08,2.46,175,-28.57,12.29,-32.33,0.00,0.00,-1.00,40.08,2.52,170,-28.57,-12.29,-32.33,0.14,0.00,-1.00,40.08,2.09,174,34.18,0.00,17.90,1.00,0.00,1.00,15.86,5.07,174,39.79,0.00,23.51,1.00,-0.16,0.00,19.62,4.59,175,-28.85,0.00,0.00,-1.00,0.04,-0.39,28.29,1.43,174,3 +0.00,0.00,0.00,1.00,0.00,0.00,26.37,6.64,174,13.19,6.64,-12.97,0.63,0.00,-1.00,12.65,0.97,175,13.19,-6.64,-12.97,0.00,0.00,-1.00,12.65,1.00,174,-13.19,6.64,-12.97,0.00,0.00,-1.00,12.65,0.99,175,-13.19,-6.64,-12.97,0.76,0.00,-1.00,12.65,1.04,177,15.98,0.00,6.64,1.00,0.00,1.00,5.59,2.58,173,18.77,0.00,6.64,1.00,-0.33,0.00,11.89,2.99,175,-19.22,0.00,0.00,-1.00,-0.11,0.18,7.15,0.33,178,1 +0.00,0.00,0.00,1.00,0.00,0.00,56.05,11.82,172,28.02,11.82,-32.10,0.37,0.00,-1.00,40.56,3.46,170,28.02,-11.82,-32.10,0.00,0.00,-1.00,40.56,2.84,175,-28.02,11.82,-32.10,0.00,0.00,-1.00,40.56,2.97,175,-28.02,-11.82,-32.10,0.12,0.00,-1.00,40.56,2.51,174,33.10,0.00,16.90,1.00,0.00,1.00,14.37,5.83,174,38.18,0.00,21.98,1.00,0.12,0.00,20.32,4.72,174,-25.64,0.00,0.00,-1.00,0.04,-0.12,30.41,1.93,173,3 +0.00,0.00,0.00,1.00,0.00,0.00,52.62,13.19,176,26.31,13.19,-33.24,0.26,0.00,-1.00,40.10,2.45,175,26.31,-13.19,-33.24,0.00,0.00,-1.00,40.10,2.37,172,-26.31,13.19,-33.24,0.00,0.00,-1.00,40.10,2.93,175,-26.31,-13.19,-33.24,0.24,0.00,-1.00,40.10,2.79,175,31.27,0.00,18.15,1.00,0.00,1.00,14.02,6.53,173,36.22,0.00,23.10,1.00,-0.05,0.00,19.67,4.12,172,-24.74,0.00,0.00,-1.00,0.30,-0.44,27.88,1.13,176,3 +0.00,0.00,0.00,1.00,0.00,0.00,23.20,7.01,177,11.60,7.01,-12.70,0.51,0.00,-1.00,11.37,1.03,171,11.60,-7.01,-12.70,0.00,0.00,-1.00,11.37,1.14,174,-11.60,7.01,-12.70,0.00,0.00,-1.00,11.37,0.91,173,-11.60,-7.01,-12.70,0.56,0.00,-1.00,11.37,0.88,170,14.46,0.00,7.01,1.00,0.00,1.00,5.72,2.37,174,17.32,0.00,7.01,1.00,-0.10,0.00,12.70,2.91,178,-16.14,0.00,0.00,-1.00,-0.00,0.54,7.06,0.29,173,1 +0.00,0.00,0.00,1.00,0.00,0.00,25.85,5.63,175,12.92,5.63,-11.87,0.27,0.00,-1.00,12.47,1.07,172,12.92,-5.63,-11.87,0.00,0.00,-1.00,12.47,1.01,174,-12.92,5.63,-11.87,0.00,0.00,-1.00,12.47,0.98,170,-12.92,-5.63,-11.87,0.06,0.00,-1.00,12.47,0.96,177,15.87,0.00,5.63,1.00,0.00,1.00,5.88,2.42,173,18.81,0.00,5.63,1.00,0.17,0.00,12.38,2.56,176,-18.76,0.00,0.00,-1.00,-0.03,0.02,7.09,0.28,174,1 +0.00,0.00,0.00,1.00,0.00,0.00,22.16,6.14,175,11.08,6.14,-12.62,0.05,0.00,-1.00,12.96,1.03,175,11.08,-6.14,-12.62,0.00,0.00,-1.00,12.96,1.15,174,-11.08,6.14,-12.62,0.00,0.00,-1.00,12.96,1.00,175,-11.08,-6.14,-12.62,0.60,0.00,-1.00,12.96,0.98,173,13.78,0.00,6.14,1.00,0.00,1.00,5.40,2.82,178,16.48,0.00,6.14,1.00,0.35,0.00,12.29,2.98,175,-14.98,0.00,0.00,-1.00,0.04,-0.05,7.18,0.34,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,18.42,3.39,175,9.21,3.39,-6.90,0.37,0.00,-1.00,7.02,0.59,175,9.21,-3.39,-6.90,0.00,0.00,-1.00,7.02,0.63,174,-9.21,3.39,-6.90,0.00,0.00,-1.00,7.02,0.61,174,-9.21,-3.39,-6.90,0.60,0.00,-1.00,7.02,0.52,173,10.85,0.00,3.39,1.00,0.00,1.00,3.27,2.25,175,12.48,0.00,3.39,1.00,0.71,0.00,4.34,2.43,176,-4.00,0.00,0.00,-1.00,-0.18,-0.13,14.42,0.40,173,2 +0.00,0.00,0.00,1.00,0.00,0.00,57.96,13.28,173,28.98,13.28,-32.96,0.13,0.00,-1.00,39.37,3.04,172,28.98,-13.28,-32.96,0.00,0.00,-1.00,39.37,2.82,175,-28.98,13.28,-32.96,0.00,0.00,-1.00,39.37,3.03,175,-28.98,-13.28,-32.96,0.28,0.00,-1.00,39.37,2.74,174,33.94,0.00,18.24,1.00,0.00,1.00,14.03,6.10,175,38.90,0.00,23.20,1.00,-0.85,0.00,21.44,5.05,174,-28.05,0.00,0.00,-1.00,-0.09,-0.24,29.91,1.55,172,3 +0.00,0.00,0.00,1.00,0.00,0.00,64.82,14.54,175,32.41,14.54,-45.27,0.18,0.00,-1.00,61.46,3.70,173,32.41,-14.54,-45.27,0.00,0.00,-1.00,61.46,2.72,174,-32.41,14.54,-45.27,0.00,0.00,-1.00,61.46,2.41,176,-32.41,-14.54,-45.27,0.23,0.00,-1.00,61.46,3.01,173,51.54,0.00,33.68,1.00,0.00,1.00,54.11,6.78,179,70.67,0.00,52.81,1.00,0.26,0.00,20.58,4.17,174,-42.35,0.00,0.00,-1.00,-0.08,-0.51,22.47,0.87,172,4 +0.00,0.00,0.00,1.00,0.00,0.00,67.87,15.63,174,33.94,15.63,-47.54,0.12,0.00,-1.00,63.82,2.72,174,33.94,-15.63,-47.54,0.00,0.00,-1.00,63.82,3.49,174,-33.94,15.63,-47.54,0.00,0.00,-1.00,63.82,2.85,175,-33.94,-15.63,-47.54,0.37,0.00,-1.00,63.82,3.62,170,51.44,0.00,33.14,1.00,0.00,1.00,49.51,7.04,174,68.95,0.00,50.64,1.00,0.64,0.00,19.90,3.98,174,-44.13,0.00,0.00,-1.00,-0.09,-0.53,23.74,0.86,177,4 +0.00,0.00,0.00,1.00,0.00,0.00,18.44,3.67,176,9.22,3.67,-6.74,0.54,0.00,-1.00,6.14,0.50,175,9.22,-3.67,-6.74,0.00,0.00,-1.00,6.14,0.74,172,-9.22,3.67,-6.74,0.00,0.00,-1.00,6.14,0.40,172,-9.22,-3.67,-6.74,0.36,0.00,-1.00,6.14,0.75,178,10.75,0.00,3.67,1.00,0.00,1.00,3.07,1.60,172,12.29,0.00,3.67,1.00,-0.13,0.00,3.82,1.78,174,-4.29,0.00,0.00,-1.00,0.64,0.57,14.15,0.38,174,2 +0.00,0.00,0.00,1.00,0.00,0.00,15.57,3.57,175,7.78,3.57,-7.59,0.69,0.00,-1.00,8.03,0.63,171,7.78,-3.57,-7.59,0.00,0.00,-1.00,8.03,0.60,177,-7.78,3.57,-7.59,0.00,0.00,-1.00,8.03,0.49,176,-7.78,-3.57,-7.59,0.02,0.00,-1.00,8.03,0.58,173,9.43,0.00,3.57,1.00,0.00,1.00,3.30,2.00,174,11.08,0.00,3.57,1.00,-0.14,0.00,4.53,2.10,174,0.69,0.00,0.00,-1.00,-0.41,0.29,16.26,0.52,173,2 +0.00,0.00,0.00,1.00,0.00,0.00,59.46,12.98,175,29.73,12.98,-33.51,0.13,0.00,-1.00,41.05,2.37,179,29.73,-12.98,-33.51,0.00,0.00,-1.00,41.05,2.89,174,-29.73,12.98,-33.51,0.00,0.00,-1.00,41.05,2.70,174,-29.73,-12.98,-33.51,0.24,0.00,-1.00,41.05,2.66,175,34.51,0.00,17.76,1.00,0.00,1.00,13.51,5.75,178,39.29,0.00,22.54,1.00,-0.74,0.00,19.47,5.42,170,-29.81,0.00,0.00,-1.00,-0.22,-0.50,29.65,1.43,175,3 +0.00,0.00,0.00,1.00,0.00,0.00,63.63,14.32,175,31.81,14.32,-46.34,0.32,0.00,-1.00,64.04,2.96,174,31.81,-14.32,-46.34,0.00,0.00,-1.00,64.04,2.47,176,-31.81,14.32,-46.34,0.00,0.00,-1.00,64.04,3.13,179,-31.81,-14.32,-46.34,0.04,0.00,-1.00,64.04,2.19,172,49.98,0.00,32.49,1.00,0.00,1.00,51.38,6.63,174,68.15,0.00,50.65,1.00,0.05,0.00,20.20,4.44,172,-41.43,0.00,0.00,-1.00,-0.05,-0.64,22.19,0.73,174,4 +0.00,0.00,0.00,1.00,0.00,0.00,55.50,11.29,176,27.75,11.29,-33.03,0.27,0.00,-1.00,43.48,2.60,174,27.75,-11.29,-33.03,0.00,0.00,-1.00,43.48,2.88,172,-27.75,11.29,-33.03,0.00,0.00,-1.00,43.48,2.83,175,-27.75,-11.29,-33.03,0.27,0.00,-1.00,43.48,2.72,170,32.76,0.00,16.30,1.00,0.00,1.00,14.16,6.04,174,37.77,0.00,21.30,1.00,-0.59,0.00,19.72,4.76,175,-24.62,0.00,0.00,-1.00,-0.09,-0.07,30.89,1.59,177,3 +0.00,0.00,0.00,1.00,0.00,0.00,24.62,5.73,171,12.31,5.73,-12.45,0.96,0.00,-1.00,13.44,1.02,174,12.31,-5.73,-12.45,0.00,0.00,-1.00,13.44,1.03,176,-12.31,5.73,-12.45,0.00,0.00,-1.00,13.44,0.98,174,-12.31,-5.73,-12.45,0.32,0.00,-1.00,13.44,0.98,174,15.02,0.00,5.73,1.00,0.00,1.00,5.42,2.41,175,17.73,0.00,5.73,1.00,-0.42,0.00,11.78,3.07,174,-17.45,0.00,0.00,-1.00,0.58,-0.53,7.17,0.32,173,1 +0.00,0.00,0.00,1.00,0.00,0.00,56.34,12.29,174,28.17,12.29,-31.90,0.26,0.00,-1.00,39.22,2.63,174,28.17,-12.29,-31.90,0.00,0.00,-1.00,39.22,2.65,172,-28.17,12.29,-31.90,0.00,0.00,-1.00,39.22,3.11,177,-28.17,-12.29,-31.90,0.30,0.00,-1.00,39.22,2.63,174,32.98,0.00,17.10,1.00,0.00,1.00,13.60,5.91,174,37.79,0.00,21.91,1.00,-0.19,0.00,20.20,4.75,177,-27.56,0.00,0.00,-1.00,0.20,-0.75,28.78,1.47,175,3 +0.00,0.00,0.00,1.00,0.00,0.00,17.49,3.31,173,8.74,3.31,-6.81,0.65,0.00,-1.00,7.00,0.55,175,8.74,-3.31,-6.81,0.00,0.00,-1.00,7.00,0.68,175,-8.74,3.31,-6.81,0.00,0.00,-1.00,7.00,0.63,178,-8.74,-3.31,-6.81,0.08,0.00,-1.00,7.00,0.59,173,10.15,0.00,3.31,1.00,0.00,1.00,2.81,2.15,176,11.56,0.00,3.31,1.00,0.19,0.00,4.08,2.04,175,-1.27,0.00,0.00,-1.00,-0.42,-0.02,16.22,0.60,175,2 +0.00,0.00,0.00,1.00,0.00,0.00,54.26,13.15,177,27.13,13.15,-33.35,0.28,0.00,-1.00,40.40,2.70,174,27.13,-13.15,-33.35,0.00,0.00,-1.00,40.40,2.48,171,-27.13,13.15,-33.35,0.00,0.00,-1.00,40.40,2.34,177,-27.13,-13.15,-33.35,0.21,0.00,-1.00,40.40,2.61,170,32.44,0.00,18.47,1.00,0.00,1.00,15.03,6.81,174,37.76,0.00,23.78,1.00,-0.21,0.00,20.33,5.04,175,-26.62,0.00,0.00,-1.00,0.68,-0.15,27.64,1.55,175,3 +0.00,0.00,0.00,1.00,0.00,0.00,60.98,13.65,174,30.49,13.65,-43.34,0.18,0.00,-1.00,59.37,2.91,175,30.49,-13.65,-43.34,0.00,0.00,-1.00,59.37,3.04,177,-30.49,13.65,-43.34,0.00,0.00,-1.00,59.37,2.60,175,-30.49,-13.65,-43.34,0.18,0.00,-1.00,59.37,2.45,175,47.40,0.00,30.56,1.00,0.00,1.00,47.82,6.94,179,64.31,0.00,47.47,1.00,0.19,0.00,20.72,4.67,174,-39.96,0.00,0.00,-1.00,-0.01,-0.45,21.02,0.76,175,4 +0.00,0.00,0.00,1.00,0.00,0.00,24.69,6.57,171,12.34,6.57,-12.84,0.57,0.00,-1.00,12.53,0.98,172,12.34,-6.57,-12.84,0.00,0.00,-1.00,12.53,1.00,175,-12.34,6.57,-12.84,0.00,0.00,-1.00,12.53,1.00,178,-12.34,-6.57,-12.84,0.47,0.00,-1.00,12.53,1.00,175,15.14,0.00,6.57,1.00,0.00,1.00,5.60,2.84,175,17.94,0.00,6.57,1.00,-0.42,0.00,12.30,2.91,170,-17.98,0.00,0.00,-1.00,-0.37,-0.28,6.71,0.39,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,23.89,6.29,177,11.94,6.29,-12.66,0.50,0.00,-1.00,12.74,1.04,179,11.94,-6.29,-12.66,0.00,0.00,-1.00,12.74,1.14,176,-11.94,6.29,-12.66,0.00,0.00,-1.00,12.74,0.94,175,-11.94,-6.29,-12.66,0.52,0.00,-1.00,12.74,1.10,172,14.68,0.00,6.29,1.00,0.00,1.00,5.46,2.53,176,17.41,0.00,6.29,1.00,0.19,0.00,11.34,2.97,172,-16.79,0.00,0.00,-1.00,0.98,0.18,7.10,0.26,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,24.57,6.38,172,12.29,6.38,-12.41,0.65,0.00,-1.00,12.05,1.04,172,12.29,-6.38,-12.41,0.00,0.00,-1.00,12.05,1.00,172,-12.29,6.38,-12.41,0.00,0.00,-1.00,12.05,1.07,175,-12.29,-6.38,-12.41,0.52,0.00,-1.00,12.05,0.87,173,14.98,0.00,6.38,1.00,0.00,1.00,5.38,2.58,175,17.67,0.00,6.38,1.00,0.81,0.00,12.38,2.99,174,-17.31,0.00,0.00,-1.00,0.01,0.10,7.26,0.28,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,24.66,6.46,173,12.33,6.46,-13.10,0.29,0.00,-1.00,13.27,0.97,175,12.33,-6.46,-13.10,0.00,0.00,-1.00,13.27,1.01,177,-12.33,6.46,-13.10,0.00,0.00,-1.00,13.27,1.03,174,-12.33,-6.46,-13.10,0.70,0.00,-1.00,13.27,0.97,172,15.08,0.00,6.46,1.00,0.00,1.00,5.51,2.64,179,17.84,0.00,6.46,1.00,-0.05,0.00,11.31,2.95,174,-18.53,0.00,0.00,-1.00,0.16,0.13,6.12,0.32,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,18.04,3.81,175,9.02,3.81,-7.65,0.39,0.00,-1.00,7.68,0.61,172,9.02,-3.81,-7.65,0.00,0.00,-1.00,7.68,0.49,178,-9.02,3.81,-7.65,0.00,0.00,-1.00,7.68,0.61,174,-9.02,-3.81,-7.65,0.30,0.00,-1.00,7.68,0.65,174,10.48,0.00,3.81,1.00,0.00,1.00,2.92,1.89,174,11.93,0.00,3.81,1.00,-0.56,0.00,4.20,2.49,174,-5.70,0.00,0.00,-1.00,0.36,0.32,12.34,0.59,175,2 +0.00,0.00,0.00,1.00,0.00,0.00,19.78,3.42,177,9.89,3.42,-7.31,0.37,0.00,-1.00,7.79,0.56,179,9.89,-3.42,-7.31,0.00,0.00,-1.00,7.79,0.51,177,-9.89,3.42,-7.31,0.00,0.00,-1.00,7.79,0.57,176,-9.89,-3.42,-7.31,0.53,0.00,-1.00,7.79,0.60,177,11.20,0.00,3.42,1.00,0.00,1.00,2.62,1.72,175,12.51,0.00,3.42,1.00,-0.18,0.00,3.87,2.42,171,-4.54,0.00,0.00,-1.00,-0.20,0.06,15.23,0.49,175,2 +0.00,0.00,0.00,1.00,0.00,0.00,59.72,11.40,172,29.86,11.40,-29.52,0.34,0.00,-1.00,36.23,2.64,176,29.86,-11.40,-29.52,0.00,0.00,-1.00,36.23,2.70,178,-29.86,11.40,-29.52,0.00,0.00,-1.00,36.23,2.37,174,-29.86,-11.40,-29.52,0.17,0.00,-1.00,36.23,2.06,173,34.82,0.00,16.36,1.00,0.00,1.00,14.03,6.03,174,39.78,0.00,21.32,1.00,-0.24,0.00,19.97,5.33,176,-31.40,0.00,0.00,-1.00,0.17,-0.65,28.33,1.97,171,3 +0.00,0.00,0.00,1.00,0.00,0.00,23.93,5.57,170,11.96,5.57,-11.48,0.56,0.00,-1.00,11.83,1.05,175,11.96,-5.57,-11.48,0.00,0.00,-1.00,11.83,1.09,173,-11.96,5.57,-11.48,0.00,0.00,-1.00,11.83,1.09,174,-11.96,-5.57,-11.48,0.74,0.00,-1.00,11.83,0.96,176,14.64,0.00,5.57,1.00,0.00,1.00,5.35,2.74,173,17.32,0.00,5.57,1.00,-0.45,0.00,12.03,3.32,175,-16.59,0.00,0.00,-1.00,-0.12,-0.08,7.34,0.25,174,1 +0.00,0.00,0.00,1.00,0.00,0.00,52.32,11.87,174,26.16,11.87,-31.51,0.23,0.00,-1.00,39.28,2.62,174,26.16,-11.87,-31.51,0.00,0.00,-1.00,39.28,2.63,174,-26.16,11.87,-31.51,0.00,0.00,-1.00,39.28,2.34,174,-26.16,-11.87,-31.51,0.02,0.00,-1.00,39.28,3.32,173,31.11,0.00,16.82,1.00,0.00,1.00,13.99,6.17,175,36.05,0.00,21.77,1.00,0.20,0.00,20.47,5.02,174,-20.34,0.00,0.00,-1.00,-0.33,-0.27,31.98,1.33,178,3 +0.00,0.00,0.00,1.00,0.00,0.00,64.93,15.11,174,32.46,15.11,-45.25,0.05,0.00,-1.00,60.27,3.65,171,32.46,-15.11,-45.25,0.00,0.00,-1.00,60.27,2.63,175,-32.46,15.11,-45.25,0.00,0.00,-1.00,60.27,3.19,175,-32.46,-15.11,-45.25,0.27,0.00,-1.00,60.27,2.43,174,51.06,0.00,33.71,1.00,0.00,1.00,52.60,8.52,175,69.66,0.00,52.31,1.00,0.74,0.00,18.85,4.84,172,-43.60,0.00,0.00,-1.00,0.53,-0.42,21.33,0.56,172,4 +0.00,0.00,0.00,1.00,0.00,0.00,18.61,3.92,174,9.31,3.92,-7.84,0.63,0.00,-1.00,7.84,0.64,173,9.31,-3.92,-7.84,0.00,0.00,-1.00,7.84,0.61,171,-9.31,3.92,-7.84,0.00,0.00,-1.00,7.84,0.51,175,-9.31,-3.92,-7.84,0.35,0.00,-1.00,7.84,0.63,174,11.21,0.00,3.92,1.00,0.00,1.00,3.81,1.94,174,13.12,0.00,3.92,1.00,-0.19,0.00,4.47,1.87,177,-3.95,0.00,0.00,-1.00,0.49,0.03,14.66,0.49,171,2 +0.00,0.00,0.00,1.00,0.00,0.00,55.95,12.29,176,27.98,12.29,-32.47,0.24,0.00,-1.00,40.35,2.67,174,27.98,-12.29,-32.47,0.00,0.00,-1.00,40.35,2.55,178,-27.98,12.29,-32.47,0.00,0.00,-1.00,40.35,3.14,174,-27.98,-12.29,-32.47,0.20,0.00,-1.00,40.35,2.60,172,32.70,0.00,17.02,1.00,0.00,1.00,13.36,6.09,175,37.43,0.00,21.75,1.00,0.21,0.00,19.93,5.53,176,-23.02,0.00,0.00,-1.00,0.59,-0.43,32.93,1.45,178,3 +0.00,0.00,0.00,1.00,0.00,0.00,17.91,3.98,175,8.95,3.98,-6.53,0.25,0.00,-1.00,5.11,0.67,178,8.95,-3.98,-6.53,0.00,0.00,-1.00,5.11,0.60,176,-8.95,3.98,-6.53,0.00,0.00,-1.00,5.11,0.59,175,-8.95,-3.98,-6.53,0.53,0.00,-1.00,5.11,0.46,179,10.18,0.00,3.98,1.00,0.00,1.00,2.46,1.77,174,11.41,0.00,3.98,1.00,-0.08,0.00,4.31,2.08,179,-4.57,0.00,0.00,-1.00,0.07,0.03,13.33,0.35,172,2 +0.00,0.00,0.00,1.00,0.00,0.00,25.58,6.87,176,12.79,6.87,-13.22,0.46,0.00,-1.00,12.70,1.03,173,12.79,-6.87,-13.22,0.00,0.00,-1.00,12.70,0.96,174,-12.79,6.87,-13.22,0.00,0.00,-1.00,12.70,0.98,172,-12.79,-6.87,-13.22,0.13,0.00,-1.00,12.70,1.04,174,15.68,0.00,6.87,1.00,0.00,1.00,5.78,2.59,178,18.57,0.00,6.87,1.00,-0.19,0.00,10.54,2.98,174,-18.38,0.00,0.00,-1.00,0.07,-0.11,7.19,0.29,174,1 +0.00,0.00,0.00,1.00,0.00,0.00,23.93,6.34,176,11.96,6.34,-11.86,0.47,0.00,-1.00,11.05,1.10,174,11.96,-6.34,-11.86,0.00,0.00,-1.00,11.05,1.12,179,-11.96,6.34,-11.86,0.00,0.00,-1.00,11.05,0.91,174,-11.96,-6.34,-11.86,0.03,0.00,-1.00,11.05,0.85,175,14.52,0.00,6.34,1.00,0.00,1.00,5.11,2.41,170,17.07,0.00,6.34,1.00,-0.06,0.00,11.77,3.03,177,-17.03,0.00,0.00,-1.00,-0.59,0.18,6.89,0.34,174,1 +0.00,0.00,0.00,1.00,0.00,0.00,25.21,6.45,175,12.61,6.45,-12.45,0.33,0.00,-1.00,12.01,0.92,171,12.61,-6.45,-12.45,0.00,0.00,-1.00,12.01,1.01,176,-12.61,6.45,-12.45,0.00,0.00,-1.00,12.01,0.99,175,-12.61,-6.45,-12.45,0.48,0.00,-1.00,12.01,1.20,172,15.50,0.00,6.45,1.00,0.00,1.00,5.79,2.31,175,18.40,0.00,6.45,1.00,-0.10,0.00,11.10,2.51,179,-18.19,0.00,0.00,-1.00,-0.01,0.36,7.03,0.31,179,1 +0.00,0.00,0.00,1.00,0.00,0.00,24.87,6.89,173,12.44,6.89,-12.92,0.21,0.00,-1.00,12.07,0.99,175,12.44,-6.89,-12.92,0.00,0.00,-1.00,12.07,1.00,172,-12.44,6.89,-12.92,0.00,0.00,-1.00,12.07,1.12,172,-12.44,-6.89,-12.92,0.37,0.00,-1.00,12.07,1.01,174,15.28,0.00,6.89,1.00,0.00,1.00,5.69,2.51,175,18.13,0.00,6.89,1.00,0.08,0.00,11.57,2.93,177,-18.47,0.00,0.00,-1.00,0.16,-0.10,6.40,0.28,176,1 +0.00,0.00,0.00,1.00,0.00,0.00,24.95,6.56,174,12.47,6.56,-12.40,0.43,0.00,-1.00,11.67,0.95,174,12.47,-6.56,-12.40,0.00,0.00,-1.00,11.67,1.01,175,-12.47,6.56,-12.40,0.00,0.00,-1.00,11.67,0.95,172,-12.47,-6.56,-12.40,0.69,0.00,-1.00,11.67,1.00,174,15.35,0.00,6.56,1.00,0.00,1.00,5.76,2.29,177,18.23,0.00,6.56,1.00,0.35,0.00,11.97,3.14,177,-17.53,0.00,0.00,-1.00,-0.13,0.05,7.42,0.28,179,1 +0.00,0.00,0.00,1.00,0.00,0.00,17.81,3.32,175,8.91,3.32,-5.98,0.77,0.00,-1.00,5.33,0.60,172,8.91,-3.32,-5.98,0.00,0.00,-1.00,5.33,0.60,175,-8.91,3.32,-5.98,0.00,0.00,-1.00,5.33,0.68,175,-8.91,-3.32,-5.98,0.26,0.00,-1.00,5.33,0.61,176,10.22,0.00,3.32,1.00,0.00,1.00,2.63,2.42,173,11.54,0.00,3.32,1.00,-0.18,0.00,3.36,2.06,174,-5.99,0.00,0.00,-1.00,-0.20,0.45,11.82,0.44,172,2 +0.00,0.00,0.00,1.00,0.00,0.00,20.23,3.51,178,10.12,3.51,-5.93,0.38,0.00,-1.00,4.83,0.60,173,10.12,-3.51,-5.93,0.00,0.00,-1.00,4.83,0.53,172,-10.12,3.51,-5.93,0.00,0.00,-1.00,4.83,0.49,172,-10.12,-3.51,-5.93,0.28,0.00,-1.00,4.83,0.67,175,11.64,0.00,3.51,1.00,0.00,1.00,3.04,2.26,174,13.15,0.00,3.51,1.00,0.02,0.00,3.80,1.97,173,-8.36,0.00,0.00,-1.00,-0.05,-0.45,11.87,0.39,174,2 +0.00,0.00,0.00,1.00,0.00,0.00,19.77,3.38,179,9.89,3.38,-7.51,0.42,0.00,-1.00,8.27,0.57,174,9.89,-3.38,-7.51,0.00,0.00,-1.00,8.27,0.72,175,-9.89,3.38,-7.51,0.00,0.00,-1.00,8.27,0.63,175,-9.89,-3.38,-7.51,0.50,0.00,-1.00,8.27,0.52,175,11.26,0.00,3.38,1.00,0.00,1.00,2.75,1.97,174,12.64,0.00,3.38,1.00,0.40,0.00,3.95,1.98,174,-6.78,0.00,0.00,-1.00,-0.06,-0.92,12.99,0.41,174,2 +0.00,0.00,0.00,1.00,0.00,0.00,18.65,3.60,178,9.33,3.60,-6.45,0.75,0.00,-1.00,5.70,0.50,171,9.33,-3.60,-6.45,0.00,0.00,-1.00,5.70,0.47,175,-9.33,3.60,-6.45,0.00,0.00,-1.00,5.70,0.60,175,-9.33,-3.60,-6.45,0.39,0.00,-1.00,5.70,0.50,177,10.89,0.00,3.60,1.00,0.00,1.00,3.13,2.14,175,12.46,0.00,3.60,1.00,-0.21,0.00,4.87,2.42,174,-5.37,0.00,0.00,-1.00,-0.48,-0.61,13.29,0.57,175,2 +0.00,0.00,0.00,1.00,0.00,0.00,19.16,4.09,170,9.58,4.09,-6.98,0.78,0.00,-1.00,5.78,0.65,175,9.58,-4.09,-6.98,0.00,0.00,-1.00,5.78,0.64,173,-9.58,4.09,-6.98,0.00,0.00,-1.00,5.78,0.63,173,-9.58,-4.09,-6.98,0.29,0.00,-1.00,5.78,0.58,175,11.14,0.00,4.09,1.00,0.00,1.00,3.13,1.83,176,12.71,0.00,4.09,1.00,-0.39,0.00,4.38,2.02,175,-9.02,0.00,0.00,-1.00,0.51,-0.48,10.14,0.49,174,2 +0.00,0.00,0.00,1.00,0.00,0.00,64.69,13.91,175,32.35,13.91,-47.36,0.23,0.00,-1.00,66.89,2.98,179,32.35,-13.91,-47.36,0.00,0.00,-1.00,66.89,3.05,174,-32.35,13.91,-47.36,0.00,0.00,-1.00,66.89,3.29,174,-32.35,-13.91,-47.36,0.24,0.00,-1.00,66.89,2.92,174,49.72,0.00,31.28,1.00,0.00,1.00,49.12,7.78,172,67.08,0.00,48.65,1.00,-0.12,0.00,19.86,5.12,172,-42.49,0.00,0.00,-1.00,0.34,-0.88,22.21,0.80,170,4 +0.00,0.00,0.00,1.00,0.00,0.00,19.62,3.84,174,9.81,3.84,-7.28,0.47,0.00,-1.00,6.89,0.70,176,9.81,-3.84,-7.28,0.00,0.00,-1.00,6.89,0.70,173,-9.81,3.84,-7.28,0.00,0.00,-1.00,6.89,0.62,174,-9.81,-3.84,-7.28,0.46,0.00,-1.00,6.89,0.62,173,11.71,0.00,3.84,1.00,0.00,1.00,3.80,1.59,175,13.61,0.00,3.84,1.00,0.09,0.00,4.15,1.84,171,-3.48,0.00,0.00,-1.00,0.16,-0.18,16.14,0.52,175,2 +0.00,0.00,0.00,1.00,0.00,0.00,68.23,15.14,175,34.11,15.14,-47.12,0.17,0.00,-1.00,63.95,2.01,174,34.11,-15.14,-47.12,0.00,0.00,-1.00,63.95,3.02,174,-34.11,15.14,-47.12,0.00,0.00,-1.00,63.95,3.03,172,-34.11,-15.14,-47.12,0.21,0.00,-1.00,63.95,2.49,177,52.13,0.00,33.16,1.00,0.00,1.00,50.95,5.76,177,70.14,0.00,51.17,1.00,-0.22,0.00,19.83,3.92,172,-44.44,0.00,0.00,-1.00,0.36,-0.68,23.78,0.74,175,4 +0.00,0.00,0.00,1.00,0.00,0.00,64.93,13.40,174,32.46,13.40,-44.94,0.09,0.00,-1.00,63.07,3.13,174,32.46,-13.40,-44.94,0.00,0.00,-1.00,63.07,3.47,172,-32.46,13.40,-44.94,0.00,0.00,-1.00,63.07,3.04,178,-32.46,-13.40,-44.94,0.26,0.00,-1.00,63.07,3.39,174,51.20,0.00,32.14,1.00,0.00,1.00,52.99,5.82,174,69.94,0.00,50.88,1.00,0.06,0.00,20.36,4.42,174,-42.00,0.00,0.00,-1.00,-0.89,-0.73,22.93,0.83,175,4 +0.00,0.00,0.00,1.00,0.00,0.00,63.98,14.49,170,31.99,14.49,-48.19,0.41,0.00,-1.00,67.39,3.60,175,31.99,-14.49,-48.19,0.00,0.00,-1.00,67.39,2.96,174,-31.99,14.49,-48.19,0.00,0.00,-1.00,67.39,3.08,177,-31.99,-14.49,-48.19,0.21,0.00,-1.00,67.39,3.52,174,49.07,0.00,31.57,1.00,0.00,1.00,48.31,6.60,177,66.15,0.00,48.65,1.00,-0.90,0.00,19.89,4.75,174,-41.15,0.00,0.00,-1.00,-0.50,-0.49,22.83,0.59,179,4 +0.00,0.00,0.00,1.00,0.00,0.00,68.92,14.15,176,34.46,14.15,-47.38,0.18,0.00,-1.00,66.45,2.89,172,34.46,-14.15,-47.38,0.00,0.00,-1.00,66.45,2.93,179,-34.46,14.15,-47.38,0.00,0.00,-1.00,66.45,3.18,173,-34.46,-14.15,-47.38,0.22,0.00,-1.00,66.45,2.84,173,53.01,0.00,32.70,1.00,0.00,1.00,52.46,6.63,172,71.56,0.00,51.25,1.00,0.09,0.00,19.98,4.60,170,-47.25,0.00,0.00,-1.00,-0.22,-0.55,21.67,0.77,171,4 +0.00,0.00,0.00,1.00,0.00,0.00,16.13,4.44,172,8.07,4.44,-7.89,0.18,0.00,-1.00,6.89,0.48,175,8.07,-4.44,-7.89,0.00,0.00,-1.00,6.89,0.66,177,-8.07,4.44,-7.89,0.00,0.00,-1.00,6.89,0.57,179,-8.07,-4.44,-7.89,0.88,0.00,-1.00,6.89,0.62,174,9.74,0.00,4.44,1.00,0.00,1.00,3.35,1.88,175,11.42,0.00,4.44,1.00,0.16,0.00,3.74,1.93,175,-1.86,0.00,0.00,-1.00,-0.46,0.18,14.27,0.49,174,2 +0.00,0.00,0.00,1.00,0.00,0.00,19.32,3.53,176,9.66,3.53,-5.55,0.69,0.00,-1.00,4.04,0.62,176,9.66,-3.53,-5.55,0.00,0.00,-1.00,4.04,0.63,177,-9.66,3.53,-5.55,0.00,0.00,-1.00,4.04,0.56,176,-9.66,-3.53,-5.55,0.70,0.00,-1.00,4.04,0.69,174,11.42,0.00,3.53,1.00,0.00,1.00,3.52,1.66,176,13.18,0.00,3.53,1.00,0.45,0.00,4.09,2.25,174,-6.77,0.00,0.00,-1.00,0.28,0.37,12.55,0.52,175,2 +0.00,0.00,0.00,1.00,0.00,0.00,55.50,10.82,170,27.75,10.82,-31.83,0.39,0.00,-1.00,42.01,2.39,175,27.75,-10.82,-31.83,0.00,0.00,-1.00,42.01,3.16,174,-27.75,10.82,-31.83,0.00,0.00,-1.00,42.01,2.64,179,-27.75,-10.82,-31.83,0.28,0.00,-1.00,42.01,2.69,175,32.80,0.00,15.87,1.00,0.00,1.00,14.29,5.82,175,37.85,0.00,20.93,1.00,0.16,0.00,19.75,4.61,172,-26.61,0.00,0.00,-1.00,0.48,-0.45,28.89,1.70,175,3 +0.00,0.00,0.00,1.00,0.00,0.00,19.56,4.38,174,9.78,4.38,-7.58,0.41,0.00,-1.00,6.40,0.61,171,9.78,-4.38,-7.58,0.00,0.00,-1.00,6.40,0.51,177,-9.78,4.38,-7.58,0.00,0.00,-1.00,6.40,0.55,176,-9.78,-4.38,-7.58,0.03,0.00,-1.00,6.40,0.64,174,11.31,0.00,4.38,1.00,0.00,1.00,3.06,1.99,172,12.84,0.00,4.38,1.00,-0.53,0.00,4.52,1.91,174,-7.16,0.00,0.00,-1.00,0.30,-0.10,12.40,0.47,176,2 +0.00,0.00,0.00,1.00,0.00,0.00,55.24,11.70,174,27.62,11.70,-29.71,0.32,0.00,-1.00,36.02,2.63,174,27.62,-11.70,-29.71,0.00,0.00,-1.00,36.02,2.59,175,-27.62,11.70,-29.71,0.00,0.00,-1.00,36.02,2.32,174,-27.62,-11.70,-29.71,0.22,0.00,-1.00,36.02,2.65,172,33.01,0.00,17.10,1.00,0.00,1.00,15.25,6.10,174,38.41,0.00,22.49,1.00,-0.21,0.00,19.57,5.77,174,-24.70,0.00,0.00,-1.00,-0.80,-0.67,30.54,1.43,174,3 +0.00,0.00,0.00,1.00,0.00,0.00,23.53,6.30,174,11.76,6.30,-12.20,0.45,0.00,-1.00,11.79,1.01,174,11.76,-6.30,-12.20,0.00,0.00,-1.00,11.79,0.90,173,-11.76,6.30,-12.20,0.00,0.00,-1.00,11.79,0.82,173,-11.76,-6.30,-12.20,0.78,0.00,-1.00,11.79,1.02,173,14.63,0.00,6.30,1.00,0.00,1.00,5.73,2.34,173,17.49,0.00,6.30,1.00,-0.03,0.00,12.92,3.08,173,-16.56,0.00,0.00,-1.00,-0.95,0.40,6.97,0.29,177,1 +0.00,0.00,0.00,1.00,0.00,0.00,57.09,13.37,179,28.55,13.37,-34.07,0.34,0.00,-1.00,41.39,2.95,174,28.55,-13.37,-34.07,0.00,0.00,-1.00,41.39,2.65,177,-28.55,13.37,-34.07,0.00,0.00,-1.00,41.39,3.14,174,-28.55,-13.37,-34.07,0.38,0.00,-1.00,41.39,2.31,172,33.60,0.00,18.43,1.00,0.00,1.00,14.30,5.87,178,38.66,0.00,23.49,1.00,-0.34,0.00,21.26,4.80,177,-27.68,0.00,0.00,-1.00,0.04,-0.28,29.41,1.79,174,3 +0.00,0.00,0.00,1.00,0.00,0.00,55.53,12.59,175,27.76,12.59,-31.79,0.39,0.00,-1.00,38.40,2.63,175,27.76,-12.59,-31.79,0.00,0.00,-1.00,38.40,2.37,172,-27.76,12.59,-31.79,0.00,0.00,-1.00,38.40,2.87,177,-27.76,-12.59,-31.79,0.25,0.00,-1.00,38.40,2.79,172,32.74,0.00,17.57,1.00,0.00,1.00,14.08,6.84,174,37.72,0.00,22.55,1.00,-0.42,0.00,20.24,4.85,175,-24.51,0.00,0.00,-1.00,0.04,-0.99,31.02,1.21,172,3 +0.00,0.00,0.00,1.00,0.00,0.00,66.61,15.91,176,33.30,15.91,-49.14,0.20,0.00,-1.00,66.46,2.02,172,33.30,-15.91,-49.14,0.00,0.00,-1.00,66.46,2.88,176,-33.30,15.91,-49.14,0.00,0.00,-1.00,66.46,2.88,176,-33.30,-15.91,-49.14,0.10,0.00,-1.00,66.46,3.38,175,50.69,0.00,33.29,1.00,0.00,1.00,49.17,7.29,175,68.08,0.00,50.68,1.00,0.51,0.00,20.36,4.63,174,-43.96,0.00,0.00,-1.00,0.11,-0.53,22.64,0.78,172,4 +0.00,0.00,0.00,1.00,0.00,0.00,21.11,4.42,179,10.55,4.42,-7.53,0.54,0.00,-1.00,6.22,0.60,175,10.55,-4.42,-7.53,0.00,0.00,-1.00,6.22,0.63,172,-10.55,4.42,-7.53,0.00,0.00,-1.00,6.22,0.58,172,-10.55,-4.42,-7.53,0.59,0.00,-1.00,6.22,0.55,174,12.10,0.00,4.42,1.00,0.00,1.00,3.09,1.73,175,13.64,0.00,4.42,1.00,0.32,0.00,3.07,2.08,176,-6.75,0.00,0.00,-1.00,-0.04,-0.19,14.36,0.61,176,2 +0.00,0.00,0.00,1.00,0.00,0.00,67.01,13.55,178,33.50,13.55,-43.47,0.29,0.00,-1.00,59.84,3.27,175,33.50,-13.55,-43.47,0.00,0.00,-1.00,59.84,3.14,172,-33.50,13.55,-43.47,0.00,0.00,-1.00,59.84,2.63,175,-33.50,-13.55,-43.47,0.24,0.00,-1.00,59.84,2.57,174,51.06,0.00,31.11,1.00,0.00,1.00,49.66,5.21,174,68.62,0.00,48.67,1.00,-0.10,0.00,20.37,4.86,172,-44.29,0.00,0.00,-1.00,0.15,-0.35,22.72,0.62,175,4 +0.00,0.00,0.00,1.00,0.00,0.00,18.96,3.62,171,9.48,3.62,-6.27,0.53,0.00,-1.00,5.28,0.61,177,9.48,-3.62,-6.27,0.00,0.00,-1.00,5.28,0.55,172,-9.48,3.62,-6.27,0.00,0.00,-1.00,5.28,0.61,175,-9.48,-3.62,-6.27,0.05,0.00,-1.00,5.28,0.57,177,10.88,0.00,3.62,1.00,0.00,1.00,2.81,1.77,177,12.29,0.00,3.62,1.00,-0.15,0.00,4.56,1.85,173,-2.57,0.00,0.00,-1.00,-0.03,0.64,16.38,0.47,177,2 +0.00,0.00,0.00,1.00,0.00,0.00,55.87,10.84,174,27.93,10.84,-31.00,0.45,0.00,-1.00,40.31,2.71,177,27.93,-10.84,-31.00,0.00,0.00,-1.00,40.31,3.15,174,-27.93,10.84,-31.00,0.00,0.00,-1.00,40.31,2.79,178,-27.93,-10.84,-31.00,0.38,0.00,-1.00,40.31,2.93,175,32.86,0.00,15.76,1.00,0.00,1.00,13.92,6.85,173,37.78,0.00,20.68,1.00,-0.57,0.00,18.86,4.57,172,-23.17,0.00,0.00,-1.00,0.56,-0.78,32.69,1.41,175,3 +0.00,0.00,0.00,1.00,0.00,0.00,19.15,2.53,176,9.57,2.53,-5.65,0.44,0.00,-1.00,6.24,0.68,170,9.57,-2.53,-5.65,0.00,0.00,-1.00,6.24,0.63,176,-9.57,2.53,-5.65,0.00,0.00,-1.00,6.24,0.64,173,-9.57,-2.53,-5.65,0.39,0.00,-1.00,6.24,0.60,177,10.94,0.00,2.53,1.00,0.00,1.00,2.73,1.98,177,12.30,0.00,2.53,1.00,-0.44,0.00,3.58,1.99,175,-6.55,0.00,0.00,-1.00,-0.56,-0.59,12.60,0.58,177,2 +0.00,0.00,0.00,1.00,0.00,0.00,18.97,4.06,171,9.49,4.06,-7.45,0.63,0.00,-1.00,6.79,0.69,179,9.49,-4.06,-7.45,0.00,0.00,-1.00,6.79,0.59,174,-9.49,4.06,-7.45,0.00,0.00,-1.00,6.79,0.63,172,-9.49,-4.06,-7.45,0.29,0.00,-1.00,6.79,0.44,171,11.00,0.00,4.06,1.00,0.00,1.00,3.04,2.03,175,12.52,0.00,4.06,1.00,-0.32,0.00,3.16,1.74,174,-5.34,0.00,0.00,-1.00,0.47,-0.05,13.63,0.43,174,2 +0.00,0.00,0.00,1.00,0.00,0.00,20.41,4.22,174,10.21,4.22,-7.43,0.77,0.00,-1.00,6.43,0.73,175,10.21,-4.22,-7.43,0.00,0.00,-1.00,6.43,0.60,172,-10.21,4.22,-7.43,0.00,0.00,-1.00,6.43,0.58,176,-10.21,-4.22,-7.43,0.55,0.00,-1.00,6.43,0.77,175,11.91,0.00,4.22,1.00,0.00,1.00,3.41,2.04,174,13.61,0.00,4.22,1.00,0.05,0.00,3.15,1.97,172,-7.03,0.00,0.00,-1.00,0.52,-0.17,13.38,0.48,175,2 +0.00,0.00,0.00,1.00,0.00,0.00,18.58,3.56,174,9.29,3.56,-6.41,0.47,0.00,-1.00,5.68,0.57,174,9.29,-3.56,-6.41,0.00,0.00,-1.00,5.68,0.61,175,-9.29,3.56,-6.41,0.00,0.00,-1.00,5.68,0.62,175,-9.29,-3.56,-6.41,0.48,0.00,-1.00,5.68,0.65,175,10.78,0.00,3.56,1.00,0.00,1.00,2.98,1.90,174,12.27,0.00,3.56,1.00,-0.10,0.00,4.76,1.94,171,-6.14,0.00,0.00,-1.00,-0.91,0.12,12.44,0.49,177,2 +0.00,0.00,0.00,1.00,0.00,0.00,23.07,6.91,176,11.54,6.91,-13.17,0.44,0.00,-1.00,12.52,1.03,172,11.54,-6.91,-13.17,0.00,0.00,-1.00,12.52,1.01,175,-11.54,6.91,-13.17,0.00,0.00,-1.00,12.52,0.93,173,-11.54,-6.91,-13.17,0.67,0.00,-1.00,12.52,1.01,174,14.27,0.00,6.91,1.00,0.00,1.00,5.48,2.23,176,17.01,0.00,6.91,1.00,-0.02,0.00,12.07,2.98,172,-15.99,0.00,0.00,-1.00,0.10,0.09,7.08,0.22,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,25.09,6.81,175,12.55,6.81,-13.04,0.40,0.00,-1.00,12.46,1.02,173,12.55,-6.81,-13.04,0.00,0.00,-1.00,12.46,1.07,171,-12.55,6.81,-13.04,0.00,0.00,-1.00,12.46,0.96,175,-12.55,-6.81,-13.04,0.73,0.00,-1.00,12.46,1.06,175,15.24,0.00,6.81,1.00,0.00,1.00,5.40,2.42,175,17.94,0.00,6.81,1.00,-0.02,0.00,12.42,3.05,174,-18.04,0.00,0.00,-1.00,0.10,-0.02,7.05,0.29,177,1 +0.00,0.00,0.00,1.00,0.00,0.00,24.26,6.69,176,12.13,6.69,-12.87,0.34,0.00,-1.00,12.36,0.99,179,12.13,-6.69,-12.87,0.00,0.00,-1.00,12.36,0.85,177,-12.13,6.69,-12.87,0.00,0.00,-1.00,12.36,0.94,173,-12.13,-6.69,-12.87,0.63,0.00,-1.00,12.36,1.16,174,14.73,0.00,6.69,1.00,0.00,1.00,5.21,2.26,175,17.33,0.00,6.69,1.00,0.09,0.00,10.35,2.93,175,-16.79,0.00,0.00,-1.00,-0.55,-0.51,7.47,0.30,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,70.15,15.33,177,35.08,15.33,-47.03,0.19,0.00,-1.00,63.40,2.87,170,35.08,-15.33,-47.03,0.00,0.00,-1.00,63.40,2.82,172,-35.08,15.33,-47.03,0.00,0.00,-1.00,63.40,2.63,176,-35.08,-15.33,-47.03,0.29,0.00,-1.00,63.40,3.06,173,52.40,0.00,32.65,1.00,0.00,1.00,48.99,6.67,175,69.72,0.00,49.97,1.00,0.10,0.00,19.38,5.20,177,-47.94,0.00,0.00,-1.00,-0.07,-0.52,22.21,0.78,170,4 +0.00,0.00,0.00,1.00,0.00,0.00,54.47,10.84,175,27.23,10.84,-30.91,0.25,0.00,-1.00,40.14,3.20,173,27.23,-10.84,-30.91,0.00,0.00,-1.00,40.14,2.34,176,-27.23,10.84,-30.91,0.00,0.00,-1.00,40.14,2.83,178,-27.23,-10.84,-30.91,0.10,0.00,-1.00,40.14,2.83,172,32.25,0.00,15.85,1.00,0.00,1.00,14.18,5.81,175,37.26,0.00,20.87,1.00,-0.07,0.00,18.44,4.87,174,-26.25,0.00,0.00,-1.00,0.10,-0.46,28.21,1.97,175,3 +0.00,0.00,0.00,1.00,0.00,0.00,23.63,6.34,177,11.82,6.34,-12.63,0.32,0.00,-1.00,12.58,0.96,174,11.82,-6.34,-12.63,0.00,0.00,-1.00,12.58,1.11,172,-11.82,6.34,-12.63,0.00,0.00,-1.00,12.58,1.01,174,-11.82,-6.34,-12.63,0.55,0.00,-1.00,12.58,0.99,173,14.56,0.00,6.34,1.00,0.00,1.00,5.48,2.24,175,17.30,0.00,6.34,1.00,-0.18,0.00,10.80,2.67,172,-17.09,0.00,0.00,-1.00,-0.14,0.18,6.54,0.29,171,1 +0.00,0.00,0.00,1.00,0.00,0.00,65.84,14.96,175,32.92,14.96,-46.22,0.17,0.00,-1.00,62.51,2.61,174,32.92,-14.96,-46.22,0.00,0.00,-1.00,62.51,3.48,174,-32.92,14.96,-46.22,0.00,0.00,-1.00,62.51,3.16,176,-32.92,-14.96,-46.22,0.29,0.00,-1.00,62.51,2.77,175,50.55,0.00,32.59,1.00,0.00,1.00,49.86,6.89,174,68.18,0.00,50.22,1.00,-0.43,0.00,19.96,4.41,177,-42.33,0.00,0.00,-1.00,-0.15,-0.28,23.50,0.62,172,4 +0.00,0.00,0.00,1.00,0.00,0.00,55.87,12.20,170,27.94,12.20,-31.55,0.14,0.00,-1.00,38.71,2.40,174,27.94,-12.20,-31.55,0.00,0.00,-1.00,38.71,2.84,173,-27.94,12.20,-31.55,0.00,0.00,-1.00,38.71,2.73,177,-27.94,-12.20,-31.55,0.24,0.00,-1.00,38.71,2.79,174,32.48,0.00,16.75,1.00,0.00,1.00,12.85,6.04,176,37.03,0.00,21.29,1.00,-0.17,0.00,20.27,5.11,175,-27.59,0.00,0.00,-1.00,-0.35,-0.60,28.29,1.29,174,3 +0.00,0.00,0.00,1.00,0.00,0.00,54.55,12.10,174,27.27,12.10,-31.75,0.46,0.00,-1.00,39.30,3.16,178,27.27,-12.10,-31.75,0.00,0.00,-1.00,39.30,2.27,174,-27.27,12.10,-31.75,0.00,0.00,-1.00,39.30,2.70,178,-27.27,-12.10,-31.75,0.26,0.00,-1.00,39.30,2.69,175,32.32,0.00,17.14,1.00,0.00,1.00,14.27,5.65,174,37.37,0.00,22.19,1.00,-0.41,0.00,20.96,5.65,175,-24.51,0.00,0.00,-1.00,0.42,-0.56,30.03,1.29,173,3 +0.00,0.00,0.00,1.00,0.00,0.00,24.78,6.60,174,12.39,6.60,-12.35,0.52,0.00,-1.00,11.49,1.04,176,12.39,-6.60,-12.35,0.00,0.00,-1.00,11.49,1.01,173,-12.39,6.60,-12.35,0.00,0.00,-1.00,11.49,1.00,174,-12.39,-6.60,-12.35,0.52,0.00,-1.00,11.49,0.97,175,15.29,0.00,6.60,1.00,0.00,1.00,5.80,2.58,172,18.19,0.00,6.60,1.00,-0.94,0.00,11.23,3.02,172,-17.73,0.00,0.00,-1.00,0.54,0.18,7.05,0.38,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,69.38,13.96,174,34.69,13.96,-47.59,0.19,0.00,-1.00,67.25,3.83,174,34.69,-13.96,-47.59,0.00,0.00,-1.00,67.25,3.80,177,-34.69,13.96,-47.59,0.00,0.00,-1.00,67.25,2.86,174,-34.69,-13.96,-47.59,0.24,0.00,-1.00,67.25,2.08,174,51.93,0.00,31.20,1.00,0.00,1.00,48.76,8.52,175,69.17,0.00,48.45,1.00,-0.02,0.00,19.01,4.54,176,-46.94,0.00,0.00,-1.00,0.14,-0.30,22.44,0.72,178,4 +0.00,0.00,0.00,1.00,0.00,0.00,51.81,11.65,174,25.91,11.65,-31.65,0.30,0.00,-1.00,40.01,2.77,174,25.91,-11.65,-31.65,0.00,0.00,-1.00,40.01,2.40,174,-25.91,11.65,-31.65,0.00,0.00,-1.00,40.01,2.91,174,-25.91,-11.65,-31.65,0.23,0.00,-1.00,40.01,2.60,171,30.61,0.00,16.35,1.00,0.00,1.00,13.30,5.90,172,35.31,0.00,21.05,1.00,-0.54,0.00,19.13,4.11,175,-23.55,0.00,0.00,-1.00,-0.19,-0.45,28.27,1.20,175,3 +0.00,0.00,0.00,1.00,0.00,0.00,19.17,2.91,172,9.58,2.91,-6.03,0.80,0.00,-1.00,6.24,0.61,175,9.58,-2.91,-6.03,0.00,0.00,-1.00,6.24,0.54,172,-9.58,2.91,-6.03,0.00,0.00,-1.00,6.24,0.41,178,-9.58,-2.91,-6.03,0.60,0.00,-1.00,6.24,0.57,174,11.08,0.00,2.91,1.00,0.00,1.00,2.99,1.92,175,12.57,0.00,2.91,1.00,0.13,0.00,4.03,2.30,176,-5.73,0.00,0.00,-1.00,0.14,-0.05,13.44,0.57,176,2 +0.00,0.00,0.00,1.00,0.00,0.00,55.36,12.25,178,27.68,12.25,-32.04,0.33,0.00,-1.00,39.58,2.72,173,27.68,-12.25,-32.04,0.00,0.00,-1.00,39.58,3.00,179,-27.68,12.25,-32.04,0.00,0.00,-1.00,39.58,2.80,178,-27.68,-12.25,-32.04,0.20,0.00,-1.00,39.58,3.00,179,31.97,0.00,16.54,1.00,0.00,1.00,12.13,6.14,175,36.26,0.00,20.83,1.00,0.27,0.00,18.39,5.24,175,-24.56,0.00,0.00,-1.00,-0.17,-0.99,30.79,1.39,174,3 +0.00,0.00,0.00,1.00,0.00,0.00,26.06,6.21,174,13.03,6.21,-12.44,0.24,0.00,-1.00,12.47,0.98,172,13.03,-6.21,-12.44,0.00,0.00,-1.00,12.47,1.02,172,-13.03,6.21,-12.44,0.00,0.00,-1.00,12.47,1.03,175,-13.03,-6.21,-12.44,0.51,0.00,-1.00,12.47,0.97,174,15.91,0.00,6.21,1.00,0.00,1.00,5.76,2.81,175,18.79,0.00,6.21,1.00,0.17,0.00,12.15,2.88,175,-19.19,0.00,0.00,-1.00,0.97,0.70,6.87,0.28,174,1 +0.00,0.00,0.00,1.00,0.00,0.00,24.68,6.66,175,12.34,6.66,-13.36,0.93,0.00,-1.00,13.39,0.86,174,12.34,-6.66,-13.36,0.00,0.00,-1.00,13.39,1.02,175,-12.34,6.66,-13.36,0.00,0.00,-1.00,13.39,0.95,175,-12.34,-6.66,-13.36,0.26,0.00,-1.00,13.39,0.98,172,15.02,0.00,6.66,1.00,0.00,1.00,5.36,2.41,176,17.70,0.00,6.66,1.00,0.01,0.00,11.51,3.06,177,-17.54,0.00,0.00,-1.00,-0.49,-0.13,7.14,0.22,175,1 +0.00,0.00,0.00,1.00,0.00,0.00,55.57,12.52,173,27.78,12.52,-32.80,0.36,0.00,-1.00,40.55,3.00,174,27.78,-12.52,-32.80,0.00,0.00,-1.00,40.55,2.72,173,-27.78,12.52,-32.80,0.00,0.00,-1.00,40.55,2.10,177,-27.78,-12.52,-32.80,0.14,0.00,-1.00,40.55,2.05,174,32.61,0.00,17.34,1.00,0.00,1.00,13.64,6.18,173,37.43,0.00,22.16,1.00,0.07,0.00,19.68,5.03,178,-25.42,0.00,0.00,-1.00,0.51,-0.52,30.15,1.45,170,3 +0.00,0.00,0.00,1.00,0.00,0.00,18.07,3.03,174,9.03,3.03,-6.33,0.44,0.00,-1.00,6.60,0.63,176,9.03,-3.03,-6.33,0.00,0.00,-1.00,6.60,0.49,173,-9.03,3.03,-6.33,0.00,0.00,-1.00,6.60,0.57,173,-9.03,-3.03,-6.33,0.65,0.00,-1.00,6.60,0.56,175,10.43,0.00,3.03,1.00,0.00,1.00,2.78,1.92,173,11.82,0.00,3.03,1.00,-0.05,0.00,4.37,2.48,177,-3.73,0.00,0.00,-1.00,0.75,-0.07,14.33,0.50,177,2 +0.00,0.00,0.00,1.00,0.00,0.00,18.07,3.62,176,9.04,3.62,-6.69,0.50,0.00,-1.00,6.14,0.56,174,9.04,-3.62,-6.69,0.00,0.00,-1.00,6.14,0.60,175,-9.04,3.62,-6.69,0.00,0.00,-1.00,6.14,0.58,172,-9.04,-3.62,-6.69,0.36,0.00,-1.00,6.14,0.61,179,10.36,0.00,3.62,1.00,0.00,1.00,2.65,2.21,174,11.68,0.00,3.62,1.00,0.13,0.00,3.70,2.03,175,-4.06,0.00,0.00,-1.00,-0.55,-0.04,14.02,0.70,172,2 \ No newline at end of file diff --git a/examples/mammmals_cpu.py b/examples/mammmals_cpu.py new file mode 100644 index 0000000..a1800d6 --- /dev/null +++ b/examples/mammmals_cpu.py @@ -0,0 +1,43 @@ +from datetime import timedelta +from timeit import default_timer as timer + +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.colors import ListedColormap + +from neo_force_scheme import NeoForceScheme, kruskal_stress + +data = np.loadtxt("./mammals.data", delimiter=",") +n, d = data.shape[0], data.shape[1] +x = data[:, range(d - 1)] +t = data[:, d - 1] + +# read the distance matrix +nfs = NeoForceScheme(verbose=True, learning_rate0=0.2, decay=0.95) + +# execute force scheme +start = timer() +projection = nfs.fit_transform(x, random_state=1) +error = nfs.projection_error_ +end = timer() + +print(f'ForceScheme took {timedelta(seconds=end - start)} to execute with error {error}') + +# calculate stress +stress = kruskal_stress(nfs.embedding_, projection) +print('Kruskal stress {0}:'.format(stress)) + +# save projection +# np.savetxt(input_file + "_projection.txt", projection, delimiter=" ", fmt="%s") + +# show projection +plt.figure() +plt.scatter(projection[:, 0], + projection[:, 1], + c=t, + cmap=ListedColormap(['blue', 'red', 'green']), + edgecolors='face', + linewidths=0.5, + s=4) +plt.grid(linestyle='dotted') +plt.show() diff --git a/examples/mammmals_large_cpu.py b/examples/mammmals_large_cpu.py new file mode 100644 index 0000000..05b9cf9 --- /dev/null +++ b/examples/mammmals_large_cpu.py @@ -0,0 +1,44 @@ +from datetime import timedelta +from timeit import default_timer as timer + +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.colors import ListedColormap + +from neo_force_scheme import NeoForceScheme, kruskal_stress + +data = np.loadtxt("./mammals.data", delimiter=",") +data = np.tile(data, (100, 1)) +n, d = data.shape[0], data.shape[1] +x = data[:, range(d - 1)] +t = data[:, d - 1] + +# read the distance matrix +nfs = NeoForceScheme(verbose=True, learning_rate0=0.2, decay=0.95) + +# execute force scheme +start = timer() +projection = nfs.fit_transform(x, random_state=1) +error = nfs.projection_error_ +end = timer() + +print(f'ForceScheme took {timedelta(seconds=end - start)} to execute with error {error}') + +# calculate stress +stress = kruskal_stress(nfs.embedding_, projection) +print('Kruskal stress {0}:'.format(stress)) + +# save projection +# np.savetxt(input_file + "_projection.txt", projection, delimiter=" ", fmt="%s") + +# show projection +plt.figure() +plt.scatter(projection[:, 0], + projection[:, 1], + c=t, + cmap=ListedColormap(['blue', 'red', 'green']), + edgecolors='face', + linewidths=0.5, + s=4) +plt.grid(linestyle='dotted') +plt.show() diff --git a/examples/mammmals_large_gpu.py b/examples/mammmals_large_gpu.py new file mode 100644 index 0000000..76c771e --- /dev/null +++ b/examples/mammmals_large_gpu.py @@ -0,0 +1,44 @@ +from datetime import timedelta +from timeit import default_timer as timer + +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.colors import ListedColormap + +from neo_force_scheme import NeoForceScheme, kruskal_stress + +data = np.loadtxt("./mammals.data", delimiter=",") +data = np.tile(data, (100, 1)) +n, d = data.shape[0], data.shape[1] +x = data[:, range(d - 1)] +t = data[:, d - 1] + +# read the distance matrix +nfs = NeoForceScheme(verbose=True, learning_rate0=0.2, decay=0.95, cuda=True) + +# execute force scheme +start = timer() +projection = nfs.fit_transform(x, random_state=1) +error = nfs.projection_error_ +end = timer() + +print(f'ForceScheme took {timedelta(seconds=end - start)} to execute with error {error}') + +# calculate stress +stress = kruskal_stress(nfs.embedding_, projection) +print('Kruskal stress {0}:'.format(stress)) + +# save projection +# np.savetxt(input_file + "_projection.txt", projection, delimiter=" ", fmt="%s") + +# show projection +plt.figure() +plt.scatter(projection[:, 0], + projection[:, 1], + c=t, + cmap=ListedColormap(['blue', 'red', 'green']), + edgecolors='face', + linewidths=0.5, + s=4) +plt.grid(linestyle='dotted') +plt.show() diff --git a/neo_force_scheme/__init__.py b/neo_force_scheme/__init__.py new file mode 100644 index 0000000..5fd00be --- /dev/null +++ b/neo_force_scheme/__init__.py @@ -0,0 +1,2 @@ +from ._neo_force_scheme import NeoForceScheme +from .kruskal_stress import kruskal_stress \ No newline at end of file diff --git a/neo_force_scheme/_neo_force_scheme.py b/neo_force_scheme/_neo_force_scheme.py new file mode 100644 index 0000000..4fb94a1 --- /dev/null +++ b/neo_force_scheme/_neo_force_scheme.py @@ -0,0 +1,317 @@ +import math +import traceback +from enum import Enum +from sys import getsizeof +from typing import Optional + +import numpy as np +from sklearn.base import BaseEstimator +from sklearn.utils.validation import check_is_fitted + +from . import utils, distances + +TPB = 16 + + +class ProjectionMode(Enum): + RANDOM = 1 + + +class NeoForceScheme(BaseEstimator): + """ForceScheme Projection technique. + + Force Scheme is??? + + It is highly recommended to use another dimensionality reduction + method (e.g. PCA for dense data or TruncatedSVD for sparse data) + to reduce the number of dimensions to a reasonable amount (e.g. 50) + if the number of features is very high. This will suppress some + noise and speed up the computation of pairwise distances between + samples. For more tips see Laurens van der Maaten's FAQ [2]. + + Parameters + ---------- + learning_rate : float, default=200.0 + The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If + the learning rate is too high, the data may look like a 'ball' with any + point approximately equidistant from its nearest neighbours. If the + learning rate is too low, most points may look compressed in a dense + cloud with few outliers. If the cost function gets stuck in a bad local + minimum increasing the learning rate may help. + n_iter : int, default=1000 + Maximum number of iterations for the optimization. Should be at + least 250. + n_iter_without_progress : int, default=300 + Maximum number of iterations without progress before we abort the + optimization, used after 250 initial iterations with early + exaggeration. Note that progress is only checked every 50 iterations so + this value is rounded to the next multiple of 50. + .. versionadded:: 0.17 + parameter *n_iter_without_progress* to control stopping criteria. + min_grad_norm : float, default=1e-7 + If the gradient norm is below this threshold, the optimization will + be stopped. + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string, it must be one of the options + allowed by scipy.spatial.distance.pdist for its metric parameter, or + a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. + If metric is "precomputed", X is assumed to be a distance matrix. + Alternatively, if metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays from X as input and return a value indicating + the distance between them. The default is "euclidean" which is + interpreted as squared euclidean distance. + init : {'random', 'pca'} or ndarray of shape (n_samples, n_components), \ + default='random' + Initialization of embedding. Possible options are 'random', 'pca', + and a numpy array of shape (n_samples, n_components). + PCA initialization cannot be used with precomputed distances and is + usually more globally stable than random initialization. + verbose : int, default=0 + Verbosity level. + random_state : int, RandomState instance or None, default=None + Determines the random number generator. Pass an int for reproducible + results across multiple function calls. Note that different + initializations might result in different local minima of the cost + function. See :term: `Glossary `. + method : str, default='barnes_hut' + By default the gradient calculation algorithm uses Barnes-Hut + approximation running in O(NlogN) time. method='exact' + will run on the slower, but exact, algorithm in O(N^2) time. The + exact algorithm should be used when nearest-neighbor errors need + to be better than 3%. However, the exact method cannot scale to + millions of examples. + .. versionadded:: 0.17 + Approximate optimization *method* via the Barnes-Hut. + Attributes + ---------- + embedding_ : array-like of shape (n_samples, n_components) + Stores the embedding vectors. + kl_divergence_ : float + Kullback-Leibler divergence after optimization. + n_iter_ : int + Number of iterations run. + Examples + -------- + # >>> import numpy as np + # >>> from sklearn.manifold import TSNE + # >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) + # >>> X_embedded = TSNE(n_components=2).fit_transform(X) + # >>> X_embedded.shape + (4, 2) + References + ---------- + [1] ... + """ + + def __init__( + self, + *, + metric="euclidean", + metric_args: list = None, + max_it: int = 100, + learning_rate0: float = 0.5, + decay: float = 0.95, + tolerance: float = 0.00001, + n_jobs: int = None, + cuda: bool = False, + cuda_threads_per_block: Optional[int] = None, + cuda_blocks_per_grid: Optional[int] = None, + cuda_profile: bool = False, + verbose: bool = False, + ): + try: + self.metric = getattr(distances, metric) + except Exception as e: + raise NotImplemented(f'Metric {metric} is not implemented', e) + + self.metric_args = metric_args + self.max_it = max_it + self.learning_rate0 = learning_rate0 + self.decay = decay + self.tolerance = tolerance + self.n_jobs = n_jobs + self.cuda = cuda + self.cuda_profile = cuda_profile + self.cuda_threads_per_block = cuda_threads_per_block + self.cuda_blocks_per_grid = cuda_blocks_per_grid + self.print = print if verbose else lambda *a, **k: None + + def save(self, filename, *, use_pickle=True): + if use_pickle: + utils.pickle_save_matrix(filename, self.embedding_, self.embedding_size_) + else: + raise NotImplemented('Only pickle save method is currently implemented') + + def load(self, filename, *, use_pickle=True): + if use_pickle: + self.embedding_, self.embedding_size_ = utils.pickle_load_matrix(filename) + else: + self.embedding_, self.embedding_size_ = utils.read_distance_matrix(filename) + + def _fit(self, X, skip_num_points=0): + self.embedding_ = utils.create_triangular_distance_matrix(X, self.metric) + self.print(f'Distance matrix size in memory: ', round(getsizeof(self.embedding_) / 1024 / 1024, 2), 'MB') + + def _transform(self, X, *, index, total, inplace): + # iterate until max_it or if the error does not change more than the tolerance + error = math.inf + for k in range(self.max_it): + learning_rate = self.learning_rate0 * math.pow((1 - k / self.max_it), self.decay) + new_error = utils.iteration(index, self.embedding_, X, learning_rate) + + if math.fabs(new_error - error) < self.tolerance: + break + + error = new_error + return X, error + + def transform( + self, + Xd: Optional[np.array] = None, + *, + starting_projection_mode: Optional[ProjectionMode] = ProjectionMode.RANDOM, + inpalce: bool = True, # TODO: implement False + random_state: float = None, + ): + """Transform X into the existing embedded space and return that + transformed output. + Parameters + ---------- + Xd : array, shape (n_samples, n_features) + New data to be transformed. + starting_projection_mode: one of [RANDOM] + Specifies the starting values of the projection. + Utilize if X is None + inpalce: boolean + Specifies whether X will be changed inplace during ForceScheme projection + + Returns + ------- + X_new : array, shape (n_samples, n_components) + Embedding of the new data in low-dimensional space. + """ + check_is_fitted(self) + total = len(self.embedding_) + size = int(math.sqrt(2 * total + 1)) + + # set the random seed + if random_state is not None: + np.random.seed(random_state) + + # randomly initialize the projection + if starting_projection_mode is not None: + if starting_projection_mode == ProjectionMode.RANDOM: + Xd = np.random.random((size, 2)) + + # create random index TODO: other than random + index = np.random.permutation(size) + + if self.cuda: + Xd, self.projection_error_ = self._gpu_transform(Xd, index=index, total=total, inplace=inpalce) + else: + Xd, self.projection_error_ = self._transform(Xd, index=index, total=total, inplace=inpalce) + + # setting the min to (0,0) + min_x = min(Xd[:, 0]) + min_y = min(Xd[:, 1]) + for i in range(size): + Xd[i][0] -= min_x + Xd[i][1] -= min_y + + return Xd + + def _gpu_transform(self, X, *, index, total, inplace): + try: + from numba import cuda + from .cuda_utils import calc_error, iteration + # iterate until max_it or if the error does not change more than the tolerance + error = math.inf + + d_distance_matrix = cuda.to_device(self.embedding_) + d_index = cuda.to_device(index) + d_projection = cuda.to_device(X) + + size = int(math.sqrt(2 * total + 1)) + threadsperblock = self.cuda_threads_per_block if self.cuda_threads_per_block else (TPB, TPB) + if self.cuda_blocks_per_grid is not None: + blockspergrid = self.cuda_blocks_per_grid + elif isinstance(threadsperblock, int): + blockspergrid = (size) // threadsperblock + else: + blockspergrid = ((size) // threadsperblock[0], (size) // threadsperblock[1]) + self.print(f'Dist: threads:{threadsperblock}, blocks:{blockspergrid}') + + if self.cuda_profile: + cuda.profile_start() + + ref_new_error = np.zeros(shape=(size, size), dtype=np.float64) + d_new_error = cuda.to_device(ref_new_error) + for k in range(self.max_it): + learning_rate = self.learning_rate0 * math.pow((1 - k / self.max_it), self.decay) + iteration[blockspergrid, threadsperblock](d_index, d_distance_matrix, d_projection, learning_rate, d_new_error) + new_error = calc_error(d_new_error.reshape(size * size)) / (size) + # d_new_error.copy_to_host(ref_new_error) + # new_error = ref_new_error.sum()/size + # self.print(new_error, error, size, np.isinf(ref_new_error).sum(), np.isnan(ref_new_error).sum()) + if math.fabs(new_error - error) < self.tolerance: + break + + error = new_error + + if self.cuda_profile: + cuda.profile_stop() + self.print(f'Iter: {k} error: {error}') + + if inplace: + d_projection.copy_to_host(X) + return X, error + else: + tmp = None + d_projection.copy_to_host(tmp) + return tmp, error + except Exception as e: + print(f'Unable to use GPU due to exception {e}. Defaulting to CPU') + traceback.print_stack() + return self._transform(X, index=index, total=total, inplace=inplace) + + def fit_transform(self, X, Xd=None, **kwargs): + """Fit X into an embedded space and return that transformed + output. + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples) + If the metric is 'precomputed' X must be a square distance + matrix. Otherwise it contains a sample per row. + Xd : ndarray of shape (n_samples, 2) + Starting configuration of the projection result. By default it is ignored, + and the starting projection is randomized using starting_projection_mode and random_state. + If specified, this must match n_samples. + starting_projection_mode: one of [RANDOM] + Specifies the starting values of the projection. + Utilize if X is None + inpalce: boolean + Specifies whether X will be changed inplace during ForceScheme projection + random_state: float + Specifies the starting random state used for randomization + + Returns + ------- + X_new : ndarray of shape (n_samples, 2) + Embedding of the training data in low-dimensional space. + """ + self._fit(X) + return self.transform(Xd, **kwargs) + + def fit(self, X, y=None): + """Fit X into an embedded space. + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples) + If the metric is 'precomputed' X must be a square distance + matrix. Otherwise it contains a sample per row. + y : Ignored + """ + self._fit(X) + return self diff --git a/neo_force_scheme/cuda_utils.py b/neo_force_scheme/cuda_utils.py new file mode 100644 index 0000000..5503760 --- /dev/null +++ b/neo_force_scheme/cuda_utils.py @@ -0,0 +1,56 @@ +import math + +from numba import cuda + + +@cuda.reduce +def calc_error(a, b): + return a + b + + +@cuda.jit(device=True) +def move(ins1, ins2, distance_matrix, projection, learning_rate, _error): + size = len(projection) + total = len(distance_matrix) + + x1x2 = projection[ins2][0] - projection[ins1][0] + y1y2 = projection[ins2][1] - projection[ins1][1] + dr2 = max(math.sqrt(x1x2 * x1x2 + y1y2 * y1y2), 0.0001) + + # getting te index in the distance matrix and getting the value + r = (ins1 + ins2 - math.fabs(ins1 - ins2)) / 2 # min(i,j) + s = (ins1 + ins2 + math.fabs(ins1 - ins2)) / 2 # max(i,j) + drn = distance_matrix[int(total - ((size - r) * (size - r + 1) / 2) + (s - r))] + + # calculate the movement + delta = (drn - dr2) + _error = math.fabs(delta) + + # moving + projection[ins2][0] += learning_rate * delta * (x1x2 / dr2) + projection[ins2][1] += learning_rate * delta * (y1y2 / dr2) + + return _error / size + + +@cuda.jit +def iteration(index, distance_matrix, projection, learning_rate, g_error): + tx = cuda.threadIdx.x + ty = cuda.threadIdx.y + bpg = cuda.gridDim.x + + # Block id in a 1D grid + bx = cuda.blockIdx.x + by = cuda.blockIdx.y + # Block width, i.e. number of threads per block + bw = cuda.blockDim.x + bh = cuda.blockDim.y + + ins1 = index[tx + bx * bw] + ins2 = index[ty + by * bh] + if tx + bx * bw == ty + by * bh: + return + + error = 0 + error = move(ins1, ins2, distance_matrix, projection, learning_rate, error) + g_error[tx + bx * bw, ty + by * bh] = error diff --git a/neo_force_scheme/distances.py b/neo_force_scheme/distances.py new file mode 100644 index 0000000..f4159e6 --- /dev/null +++ b/neo_force_scheme/distances.py @@ -0,0 +1,1300 @@ +# Author: Leland McInnes +# +# License: BSD 3 clause +import numba +import numpy as np +import scipy.stats +from sklearn.metrics import pairwise_distances + +_mock_identity = np.eye(2, dtype=np.float64) +_mock_cost = 1.0 - _mock_identity +_mock_ones = np.ones(2, dtype=np.float64) + + +@numba.njit() +def sign(a): + if a < 0: + return -1 + else: + return 1 + + +@numba.njit(fastmath=True) +def euclidean(x, y): + """Standard euclidean distance. + + ..math:: + D(x, y) = \sqrt{\sum_i (x_i - y_i)^2} + """ + result = 0.0 + for i in range(x.shape[0]): + result += (x[i] - y[i]) ** 2 + return np.sqrt(result) + + +@numba.njit(fastmath=True) +def euclidean_grad(x, y): + """Standard euclidean distance and its gradient. + + ..math:: + D(x, y) = \sqrt{\sum_i (x_i - y_i)^2} + \frac{dD(x, y)}{dx} = (x_i - y_i)/D(x,y) + """ + result = 0.0 + for i in range(x.shape[0]): + result += (x[i] - y[i]) ** 2 + d = np.sqrt(result) + grad = (x - y) / (1e-6 + d) + return d, grad + + +@numba.njit() +def standardised_euclidean(x, y, sigma=_mock_ones): + """Euclidean distance standardised against a vector of standard + deviations per coordinate. + + ..math:: + D(x, y) = \sqrt{\sum_i \frac{(x_i - y_i)**2}{v_i}} + """ + result = 0.0 + for i in range(x.shape[0]): + result += ((x[i] - y[i]) ** 2) / sigma[i] + + return np.sqrt(result) + + +@numba.njit(fastmath=True) +def standardised_euclidean_grad(x, y, sigma=_mock_ones): + """Euclidean distance standardised against a vector of standard + deviations per coordinate with gradient. + + ..math:: + D(x, y) = \sqrt{\sum_i \frac{(x_i - y_i)**2}{v_i}} + """ + result = 0.0 + for i in range(x.shape[0]): + result += (x[i] - y[i]) ** 2 / sigma[i] + d = np.sqrt(result) + grad = (x - y) / (1e-6 + d * sigma) + return d, grad + + +@numba.njit() +def manhattan(x, y): + """Manhattan, taxicab, or l1 distance. + + ..math:: + D(x, y) = \sum_i |x_i - y_i| + """ + result = 0.0 + for i in range(x.shape[0]): + result += np.abs(x[i] - y[i]) + + return result + + +@numba.njit() +def manhattan_grad(x, y): + """Manhattan, taxicab, or l1 distance with gradient. + + ..math:: + D(x, y) = \sum_i |x_i - y_i| + """ + result = 0.0 + grad = np.zeros(x.shape) + for i in range(x.shape[0]): + result += np.abs(x[i] - y[i]) + grad[i] = np.sign(x[i] - y[i]) + return result, grad + + +@numba.njit() +def chebyshev(x, y): + """Chebyshev or l-infinity distance. + + ..math:: + D(x, y) = \max_i |x_i - y_i| + """ + result = 0.0 + for i in range(x.shape[0]): + result = max(result, np.abs(x[i] - y[i])) + + return result + + +@numba.njit() +def chebyshev_grad(x, y): + """Chebyshev or l-infinity distance with gradient. + + ..math:: + D(x, y) = \max_i |x_i - y_i| + """ + result = 0.0 + max_i = 0 + for i in range(x.shape[0]): + v = np.abs(x[i] - y[i]) + if v > result: + result = v + max_i = i + grad = np.zeros(x.shape) + grad[max_i] = np.sign(x[max_i] - y[max_i]) + + return result, grad + + +@numba.njit() +def minkowski(x, y, p=2): + """Minkowski distance. + + ..math:: + D(x, y) = \left(\sum_i |x_i - y_i|^p\right)^{\frac{1}{p}} + + This is a general distance. For p=1 it is equivalent to + manhattan distance, for p=2 it is Euclidean distance, and + for p=infinity it is Chebyshev distance. In general it is better + to use the more specialised functions for those distances. + """ + result = 0.0 + for i in range(x.shape[0]): + result += (np.abs(x[i] - y[i])) ** p + + return result ** (1.0 / p) + + +@numba.njit() +def minkowski_grad(x, y, p=2): + """Minkowski distance with gradient. + + ..math:: + D(x, y) = \left(\sum_i |x_i - y_i|^p\right)^{\frac{1}{p}} + + This is a general distance. For p=1 it is equivalent to + manhattan distance, for p=2 it is Euclidean distance, and + for p=infinity it is Chebyshev distance. In general it is better + to use the more specialised functions for those distances. + """ + result = 0.0 + for i in range(x.shape[0]): + result += (np.abs(x[i] - y[i])) ** p + + grad = np.empty(x.shape[0], dtype=np.float32) + for i in range(x.shape[0]): + grad[i] = ( + pow(np.abs(x[i] - y[i]), (p - 1.0)) + * sign(x[i] - y[i]) + * pow(result, (1.0 / (p - 1))) + ) + + return result ** (1.0 / p), grad + + +@numba.njit() +def poincare(u, v): + """Poincare distance. + + ..math:: + \delta (u, v) = 2 \frac{ \lVert u - v \rVert ^2 }{ ( 1 - \lVert u \rVert ^2 ) ( 1 - \lVert v \rVert ^2 ) } + D(x, y) = \operatorname{arcosh} (1+\delta (u,v)) + """ + sq_u_norm = np.sum(u * u) + sq_v_norm = np.sum(v * v) + sq_dist = np.sum(np.power(u - v, 2)) + return np.arccosh(1 + 2 * (sq_dist / ((1 - sq_u_norm) * (1 - sq_v_norm)))) + + +@numba.njit() +def hyperboloid_grad(x, y): + s = np.sqrt(1 + np.sum(x ** 2)) + t = np.sqrt(1 + np.sum(y ** 2)) + + B = s * t + for i in range(x.shape[0]): + B -= x[i] * y[i] + + if B <= 1: + B = 1.0 + 1e-8 + + grad_coeff = 1.0 / (np.sqrt(B - 1) * np.sqrt(B + 1)) + + # return np.arccosh(B), np.zeros(x.shape[0]) + + grad = np.zeros(x.shape[0]) + for i in range(x.shape[0]): + grad[i] = grad_coeff * (((x[i] * t) / s) - y[i]) + + return np.arccosh(B), grad + + +@numba.njit() +def weighted_minkowski(x, y, w=_mock_ones, p=2): + """A weighted version of Minkowski distance. + + ..math:: + D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}} + + If weights w_i are inverse standard deviations of data in each dimension + then this represented a standardised Minkowski distance (and is + equivalent to standardised Euclidean distance for p=1). + """ + result = 0.0 + for i in range(x.shape[0]): + result += (w[i] * np.abs(x[i] - y[i])) ** p + + return result ** (1.0 / p) + + +@numba.njit() +def weighted_minkowski_grad(x, y, w=_mock_ones, p=2): + """A weighted version of Minkowski distance with gradient. + + ..math:: + D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}} + + If weights w_i are inverse standard deviations of data in each dimension + then this represented a standardised Minkowski distance (and is + equivalent to standardised Euclidean distance for p=1). + """ + result = 0.0 + for i in range(x.shape[0]): + result += (w[i] * np.abs(x[i] - y[i])) ** p + + grad = np.empty(x.shape[0], dtype=np.float32) + for i in range(x.shape[0]): + grad[i] = ( + w[i] ** p + * pow(np.abs(x[i] - y[i]), (p - 1.0)) + * sign(x[i] - y[i]) + * pow(result, (1.0 / (p - 1))) + ) + + return result ** (1.0 / p), grad + + +@numba.njit() +def mahalanobis(x, y, vinv=_mock_identity): + result = 0.0 + + diff = np.empty(x.shape[0], dtype=np.float32) + + for i in range(x.shape[0]): + diff[i] = x[i] - y[i] + + for i in range(x.shape[0]): + tmp = 0.0 + for j in range(x.shape[0]): + tmp += vinv[i, j] * diff[j] + result += tmp * diff[i] + + return np.sqrt(result) + + +@numba.njit() +def mahalanobis_grad(x, y, vinv=_mock_identity): + result = 0.0 + + diff = np.empty(x.shape[0], dtype=np.float32) + + for i in range(x.shape[0]): + diff[i] = x[i] - y[i] + + grad_tmp = np.zeros(x.shape) + for i in range(x.shape[0]): + tmp = 0.0 + for j in range(x.shape[0]): + tmp += vinv[i, j] * diff[j] + grad_tmp[i] += vinv[i, j] * diff[j] + result += tmp * diff[i] + dist = np.sqrt(result) + grad = grad_tmp / (1e-6 + dist) + return dist, grad + + +@numba.njit() +def hamming(x, y): + result = 0.0 + for i in range(x.shape[0]): + if x[i] != y[i]: + result += 1.0 + + return float(result) / x.shape[0] + + +@numba.njit() +def canberra(x, y): + result = 0.0 + for i in range(x.shape[0]): + denominator = np.abs(x[i]) + np.abs(y[i]) + if denominator > 0: + result += np.abs(x[i] - y[i]) / denominator + + return result + + +@numba.njit() +def canberra_grad(x, y): + result = 0.0 + grad = np.zeros(x.shape) + for i in range(x.shape[0]): + denominator = np.abs(x[i]) + np.abs(y[i]) + if denominator > 0: + result += np.abs(x[i] - y[i]) / denominator + grad[i] = ( + np.sign(x[i] - y[i]) / denominator + - np.abs(x[i] - y[i]) * np.sign(x[i]) / denominator ** 2 + ) + + return result, grad + + +@numba.njit() +def bray_curtis(x, y): + numerator = 0.0 + denominator = 0.0 + for i in range(x.shape[0]): + numerator += np.abs(x[i] - y[i]) + denominator += np.abs(x[i] + y[i]) + + if denominator > 0.0: + return float(numerator) / denominator + else: + return 0.0 + + +@numba.njit() +def bray_curtis_grad(x, y): + numerator = 0.0 + denominator = 0.0 + for i in range(x.shape[0]): + numerator += np.abs(x[i] - y[i]) + denominator += np.abs(x[i] + y[i]) + + if denominator > 0.0: + dist = float(numerator) / denominator + grad = (np.sign(x - y) - dist) / denominator + else: + dist = 0.0 + grad = np.zeros(x.shape) + + return dist, grad + + +@numba.njit() +def jaccard(x, y): + num_non_zero = 0.0 + num_equal = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_non_zero += x_true or y_true + num_equal += x_true and y_true + + if num_non_zero == 0.0: + return 0.0 + else: + return float(num_non_zero - num_equal) / num_non_zero + + +@numba.njit() +def matching(x, y): + num_not_equal = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_not_equal += x_true != y_true + + return float(num_not_equal) / x.shape[0] + + +@numba.njit() +def dice(x, y): + num_true_true = 0.0 + num_not_equal = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_true_true += x_true and y_true + num_not_equal += x_true != y_true + + if num_not_equal == 0.0: + return 0.0 + else: + return num_not_equal / (2.0 * num_true_true + num_not_equal) + + +@numba.njit() +def kulsinski(x, y): + num_true_true = 0.0 + num_not_equal = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_true_true += x_true and y_true + num_not_equal += x_true != y_true + + if num_not_equal == 0: + return 0.0 + else: + return float(num_not_equal - num_true_true + x.shape[0]) / ( + num_not_equal + x.shape[0] + ) + + +@numba.njit() +def rogers_tanimoto(x, y): + num_not_equal = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_not_equal += x_true != y_true + + return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal) + + +@numba.njit() +def russellrao(x, y): + num_true_true = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_true_true += x_true and y_true + + if num_true_true == np.sum(x != 0) and num_true_true == np.sum(y != 0): + return 0.0 + else: + return float(x.shape[0] - num_true_true) / (x.shape[0]) + + +@numba.njit() +def sokal_michener(x, y): + num_not_equal = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_not_equal += x_true != y_true + + return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal) + + +@numba.njit() +def sokal_sneath(x, y): + num_true_true = 0.0 + num_not_equal = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_true_true += x_true and y_true + num_not_equal += x_true != y_true + + if num_not_equal == 0.0: + return 0.0 + else: + return num_not_equal / (0.5 * num_true_true + num_not_equal) + + +@numba.njit() +def haversine(x, y): + if x.shape[0] != 2: + raise ValueError("haversine is only defined for 2 dimensional data") + sin_lat = np.sin(0.5 * (x[0] - y[0])) + sin_long = np.sin(0.5 * (x[1] - y[1])) + result = np.sqrt(sin_lat ** 2 + np.cos(x[0]) * np.cos(y[0]) * sin_long ** 2) + return 2.0 * np.arcsin(result) + + +@numba.njit() +def haversine_grad(x, y): + # spectral initialization puts many points near the poles + # currently, adding pi/2 to the latitude avoids problems + # TODO: reimplement with quaternions to avoid singularity + + if x.shape[0] != 2: + raise ValueError("haversine is only defined for 2 dimensional data") + sin_lat = np.sin(0.5 * (x[0] - y[0])) + cos_lat = np.cos(0.5 * (x[0] - y[0])) + sin_long = np.sin(0.5 * (x[1] - y[1])) + cos_long = np.cos(0.5 * (x[1] - y[1])) + + a_0 = np.cos(x[0] + np.pi / 2) * np.cos(y[0] + np.pi / 2) * sin_long ** 2 + a_1 = a_0 + sin_lat ** 2 + + d = 2.0 * np.arcsin(np.sqrt(min(max(abs(a_1), 0), 1))) + denom = np.sqrt(abs(a_1 - 1)) * np.sqrt(abs(a_1)) + grad = ( + np.array( + [ + ( + sin_lat * cos_lat + - np.sin(x[0] + np.pi / 2) + * np.cos(y[0] + np.pi / 2) + * sin_long ** 2 + ), + ( + np.cos(x[0] + np.pi / 2) + * np.cos(y[0] + np.pi / 2) + * sin_long + * cos_long + ), + ] + ) + / (denom + 1e-6) + ) + return d, grad + + +@numba.njit() +def yule(x, y): + num_true_true = 0.0 + num_true_false = 0.0 + num_false_true = 0.0 + for i in range(x.shape[0]): + x_true = x[i] != 0 + y_true = y[i] != 0 + num_true_true += x_true and y_true + num_true_false += x_true and (not y_true) + num_false_true += (not x_true) and y_true + + num_false_false = x.shape[0] - num_true_true - num_true_false - num_false_true + + if num_true_false == 0.0 or num_false_true == 0.0: + return 0.0 + else: + return (2.0 * num_true_false * num_false_true) / ( + num_true_true * num_false_false + num_true_false * num_false_true + ) + + +@numba.njit() +def cosine(x, y): + result = 0.0 + norm_x = 0.0 + norm_y = 0.0 + for i in range(x.shape[0]): + result += x[i] * y[i] + norm_x += x[i] ** 2 + norm_y += y[i] ** 2 + + if norm_x == 0.0 and norm_y == 0.0: + return 0.0 + elif norm_x == 0.0 or norm_y == 0.0: + return 1.0 + else: + return 1.0 - (result / np.sqrt(norm_x * norm_y)) + + +@numba.njit(fastmath=True) +def cosine_grad(x, y): + result = 0.0 + norm_x = 0.0 + norm_y = 0.0 + for i in range(x.shape[0]): + result += x[i] * y[i] + norm_x += x[i] ** 2 + norm_y += y[i] ** 2 + + if norm_x == 0.0 and norm_y == 0.0: + dist = 0.0 + grad = np.zeros(x.shape) + elif norm_x == 0.0 or norm_y == 0.0: + dist = 1.0 + grad = np.zeros(x.shape) + else: + grad = -(x * result - y * norm_x) / np.sqrt(norm_x ** 3 * norm_y) + dist = 1.0 - (result / np.sqrt(norm_x * norm_y)) + + return dist, grad + + +@numba.njit() +def correlation(x, y): + mu_x = 0.0 + mu_y = 0.0 + norm_x = 0.0 + norm_y = 0.0 + dot_product = 0.0 + + for i in range(x.shape[0]): + mu_x += x[i] + mu_y += y[i] + + mu_x /= x.shape[0] + mu_y /= x.shape[0] + + for i in range(x.shape[0]): + shifted_x = x[i] - mu_x + shifted_y = y[i] - mu_y + norm_x += shifted_x ** 2 + norm_y += shifted_y ** 2 + dot_product += shifted_x * shifted_y + + if norm_x == 0.0 and norm_y == 0.0: + return 0.0 + elif dot_product == 0.0: + return 1.0 + else: + return 1.0 - (dot_product / np.sqrt(norm_x * norm_y)) + + +@numba.njit() +def hellinger(x, y): + result = 0.0 + l1_norm_x = 0.0 + l1_norm_y = 0.0 + + for i in range(x.shape[0]): + result += np.sqrt(x[i] * y[i]) + l1_norm_x += x[i] + l1_norm_y += y[i] + + if l1_norm_x == 0 and l1_norm_y == 0: + return 0.0 + elif l1_norm_x == 0 or l1_norm_y == 0: + return 1.0 + else: + return np.sqrt(1 - result / np.sqrt(l1_norm_x * l1_norm_y)) + + +@numba.njit() +def hellinger_grad(x, y): + result = 0.0 + l1_norm_x = 0.0 + l1_norm_y = 0.0 + + grad_term = np.empty(x.shape[0]) + + for i in range(x.shape[0]): + grad_term[i] = np.sqrt(x[i] * y[i]) + result += grad_term[i] + l1_norm_x += x[i] + l1_norm_y += y[i] + + if l1_norm_x == 0 and l1_norm_y == 0: + dist = 0.0 + grad = np.zeros(x.shape) + elif l1_norm_x == 0 or l1_norm_y == 0: + dist = 1.0 + grad = np.zeros(x.shape) + else: + dist_denom = np.sqrt(l1_norm_x * l1_norm_y) + dist = np.sqrt(1 - result / dist_denom) + grad_denom = 2 * dist + grad_numer_const = (l1_norm_y * result) / (2 * dist_denom ** 3) + + grad = (grad_numer_const - (y / grad_term * dist_denom)) / grad_denom + + return dist, grad + + +@numba.njit() +def approx_log_Gamma(x): + if x == 1: + return 0 + # x2= 1/(x*x); + return x * np.log(x) - x + 0.5 * np.log(2.0 * np.pi / x) + 1.0 / (x * 12.0) + # + x2*(-1.0/360.0) + x2* (1.0/1260.0 + x2*(-1.0/(1680.0) +\ + # x2*(1.0/1188.0 + x2*(-691.0/360360.0 + x2*(1.0/156.0 +\ + # x2*(-3617.0/122400.0 + x2*(43687.0/244188.0 + x2*(-174611.0/125400.0) +\ + # x2*(77683.0/5796.0 + x2*(-236364091.0/1506960.0 + x2*(657931.0/300.0)))))))))))) + + +@numba.njit() +def log_beta(x, y): + a = min(x, y) + b = max(x, y) + if b < 5: + value = -np.log(b) + for i in range(1, int(a)): + value += np.log(i) - np.log(b + i) + return value + else: + return approx_log_Gamma(x) + approx_log_Gamma(y) - approx_log_Gamma(x + y) + + +@numba.njit() +def log_single_beta(x): + return np.log(2.0) * (-2.0 * x + 0.5) + 0.5 * np.log(2.0 * np.pi / x) + 0.125 / x + + +# + x2*(-1.0/192.0 + x2* (1.0/640.0 + x2*(-17.0/(14336.0) +\ +# x2*(31.0/18432.0 + x2*(-691.0/180224.0 +\ +# x2*(5461.0/425984.0 + x2*(-929569.0/15728640.0 +\ +# x2*(3189151.0/8912896.0 + x2*(-221930581.0/79691776.0) +\ +# x2*(4722116521.0/176160768.0 + x2*(-968383680827.0/3087007744.0 +\ +# x2*(14717667114151.0/3355443200.0 )))))))))))) + + +@numba.njit() +def ll_dirichlet(data1, data2): + """The symmetric relative log likelihood of rolling data2 vs data1 + in n trials on a die that rolled data1 in sum(data1) trials. + + ..math:: + D(data1, data2) = DirichletMultinomail(data2 | data1) + """ + + n1 = np.sum(data1) + n2 = np.sum(data2) + + log_b = 0.0 + self_denom1 = 0.0 + self_denom2 = 0.0 + + for i in range(data1.shape[0]): + if data1[i] * data2[i] > 0.9: + log_b += log_beta(data1[i], data2[i]) + self_denom1 += log_single_beta(data1[i]) + self_denom2 += log_single_beta(data2[i]) + + else: + if data1[i] > 0.9: + self_denom1 += log_single_beta(data1[i]) + + if data2[i] > 0.9: + self_denom2 += log_single_beta(data2[i]) + + return np.sqrt( + 1.0 / n2 * (log_b - log_beta(n1, n2) - (self_denom2 - log_single_beta(n2))) + + 1.0 / n1 * (log_b - log_beta(n2, n1) - (self_denom1 - log_single_beta(n1))) + ) + + +@numba.njit(fastmath=True) +def symmetric_kl(x, y, z=1e-11): # pragma: no cover + """ + symmetrized KL divergence between two probability distributions + + ..math:: + D(x, y) = \frac{D_{KL}\left(x \Vert y\right) + D_{KL}\left(y \Vert x\right)}{2} + """ + n = x.shape[0] + x_sum = 0.0 + y_sum = 0.0 + kl1 = 0.0 + kl2 = 0.0 + + for i in range(n): + x[i] += z + x_sum += x[i] + y[i] += z + y_sum += y[i] + + for i in range(n): + x[i] /= x_sum + y[i] /= y_sum + + for i in range(n): + kl1 += x[i] * np.log(x[i] / y[i]) + kl2 += y[i] * np.log(y[i] / x[i]) + + return (kl1 + kl2) / 2 + + +@numba.njit(fastmath=True) +def symmetric_kl_grad(x, y, z=1e-11): # pragma: no cover + """ + symmetrized KL divergence and its gradient + + """ + n = x.shape[0] + x_sum = 0.0 + y_sum = 0.0 + kl1 = 0.0 + kl2 = 0.0 + + for i in range(n): + x[i] += z + x_sum += x[i] + y[i] += z + y_sum += y[i] + + for i in range(n): + x[i] /= x_sum + y[i] /= y_sum + + for i in range(n): + kl1 += x[i] * np.log(x[i] / y[i]) + kl2 += y[i] * np.log(y[i] / x[i]) + + dist = (kl1 + kl2) / 2 + grad = (np.log(y / x) - (x / y) + 1) / 2 + + return dist, grad + + +@numba.njit() +def correlation_grad(x, y): + mu_x = 0.0 + mu_y = 0.0 + norm_x = 0.0 + norm_y = 0.0 + dot_product = 0.0 + + for i in range(x.shape[0]): + mu_x += x[i] + mu_y += y[i] + + mu_x /= x.shape[0] + mu_y /= x.shape[0] + + for i in range(x.shape[0]): + shifted_x = x[i] - mu_x + shifted_y = y[i] - mu_y + norm_x += shifted_x ** 2 + norm_y += shifted_y ** 2 + dot_product += shifted_x * shifted_y + + if norm_x == 0.0 and norm_y == 0.0: + dist = 0.0 + grad = np.zeros(x.shape) + elif dot_product == 0.0: + dist = 1.0 + grad = np.zeros(x.shape) + else: + dist = 1.0 - (dot_product / np.sqrt(norm_x * norm_y)) + grad = ((x - mu_x) / norm_x - (y - mu_y) / dot_product) * dist + + return dist, grad + + +@numba.njit(fastmath=True) +def sinkhorn_distance( + x, y, M=_mock_identity, cost=_mock_cost, maxiter=64 +): # pragma: no cover + p = (x / x.sum()).astype(np.float32) + q = (y / y.sum()).astype(np.float32) + + u = np.ones(p.shape, dtype=np.float32) + v = np.ones(q.shape, dtype=np.float32) + + for n in range(maxiter): + t = M @ v + u[t > 0] = p[t > 0] / t[t > 0] + t = M.T @ u + v[t > 0] = q[t > 0] / t[t > 0] + + pi = np.diag(v) @ M @ np.diag(u) + result = 0.0 + for i in range(pi.shape[0]): + for j in range(pi.shape[1]): + if pi[i, j] > 0: + result += pi[i, j] * cost[i, j] + + return result + + +@numba.njit(fastmath=True) +def spherical_gaussian_energy_grad(x, y): # pragma: no cover + mu_1 = x[0] - y[0] + mu_2 = x[1] - y[1] + + sigma = np.abs(x[2]) + np.abs(y[2]) + sign_sigma = np.sign(x[2]) + + dist = (mu_1 ** 2 + mu_2 ** 2) / (2 * sigma) + np.log(sigma) + np.log(2 * np.pi) + grad = np.empty(3, np.float32) + + grad[0] = mu_1 / sigma + grad[1] = mu_2 / sigma + grad[2] = sign_sigma * (1.0 / sigma - (mu_1 ** 2 + mu_2 ** 2) / (2 * sigma ** 2)) + + return dist, grad + + +@numba.njit(fastmath=True) +def diagonal_gaussian_energy_grad(x, y): # pragma: no cover + mu_1 = x[0] - y[0] + mu_2 = x[1] - y[1] + + sigma_11 = np.abs(x[2]) + np.abs(y[2]) + sigma_12 = 0.0 + sigma_22 = np.abs(x[3]) + np.abs(y[3]) + + det = sigma_11 * sigma_22 + sign_s1 = np.sign(x[2]) + sign_s2 = np.sign(x[3]) + + if det == 0.0: + # TODO: figure out the right thing to do here + return mu_1 ** 2 + mu_2 ** 2, np.array([0.0, 0.0, 1.0, 1.0], dtype=np.float32) + + cross_term = 2 * sigma_12 + m_dist = ( + np.abs(sigma_22) * (mu_1 ** 2) + - cross_term * mu_1 * mu_2 + + np.abs(sigma_11) * (mu_2 ** 2) + ) + + dist = (m_dist / det + np.log(np.abs(det))) / 2.0 + np.log(2 * np.pi) + grad = np.empty(6, dtype=np.float32) + + grad[0] = (2 * sigma_22 * mu_1 - cross_term * mu_2) / (2 * det) + grad[1] = (2 * sigma_11 * mu_2 - cross_term * mu_1) / (2 * det) + grad[2] = sign_s1 * (sigma_22 * (det - m_dist) + det * mu_2 ** 2) / (2 * det ** 2) + grad[3] = sign_s2 * (sigma_11 * (det - m_dist) + det * mu_1 ** 2) / (2 * det ** 2) + + return dist, grad + + +@numba.njit(fastmath=True) +def gaussian_energy_grad(x, y): # pragma: no cover + mu_1 = x[0] - y[0] + mu_2 = x[1] - y[1] + + # Ensure width are positive + x[2] = np.abs(x[2]) + y[2] = np.abs(y[2]) + + # Ensure heights are positive + x[3] = np.abs(x[3]) + y[3] = np.abs(y[3]) + + # Ensure angle is in range -pi,pi + x[4] = np.arcsin(np.sin(x[4])) + y[4] = np.arcsin(np.sin(y[4])) + + # Covariance entries for y + a = y[2] * np.cos(y[4]) ** 2 + y[3] * np.sin(y[4]) ** 2 + b = (y[2] - y[3]) * np.sin(y[4]) * np.cos(y[4]) + c = y[3] * np.cos(y[4]) ** 2 + y[2] * np.sin(y[4]) ** 2 + + # Sum of covariance matrices + sigma_11 = x[2] * np.cos(x[4]) ** 2 + x[3] * np.sin(x[4]) ** 2 + a + sigma_12 = (x[2] - x[3]) * np.sin(x[4]) * np.cos(x[4]) + b + sigma_22 = x[2] * np.sin(x[4]) ** 2 + x[3] * np.cos(x[4]) ** 2 + c + + # Determinant of the sum of covariances + det_sigma = np.abs(sigma_11 * sigma_22 - sigma_12 ** 2) + x_inv_sigma_y_numerator = ( + sigma_22 * mu_1 ** 2 - 2 * sigma_12 * mu_1 * mu_2 + sigma_11 * mu_2 ** 2 + ) + + if det_sigma < 1e-32: + return ( + mu_1 ** 2 + mu_2 ** 2, + np.array([0.0, 0.0, 1.0, 1.0, 0.0], dtype=np.float32), + ) + + dist = x_inv_sigma_y_numerator / det_sigma + np.log(det_sigma) + np.log(2 * np.pi) + + grad = np.zeros(5, np.float32) + grad[0] = (2 * sigma_22 * mu_1 - 2 * sigma_12 * mu_2) / det_sigma + grad[1] = (2 * sigma_11 * mu_2 - 2 * sigma_12 * mu_1) / det_sigma + + grad[2] = mu_2 * (mu_2 * np.cos(x[4]) ** 2 - mu_1 * np.cos(x[4]) * np.sin(x[4])) + grad[2] += mu_1 * (mu_1 * np.sin(x[4]) ** 2 - mu_2 * np.cos(x[4]) * np.sin(x[4])) + grad[2] *= det_sigma + grad[2] -= x_inv_sigma_y_numerator * np.cos(x[4]) ** 2 * sigma_22 + grad[2] -= x_inv_sigma_y_numerator * np.sin(x[4]) ** 2 * sigma_11 + grad[2] += x_inv_sigma_y_numerator * 2 * sigma_12 * np.sin(x[4]) * np.cos(x[4]) + grad[2] /= det_sigma ** 2 + 1e-8 + + grad[3] = mu_1 * (mu_1 * np.cos(x[4]) ** 2 - mu_2 * np.cos(x[4]) * np.sin(x[4])) + grad[3] += mu_2 * (mu_2 * np.sin(x[4]) ** 2 - mu_1 * np.cos(x[4]) * np.sin(x[4])) + grad[3] *= det_sigma + grad[3] -= x_inv_sigma_y_numerator * np.sin(x[4]) ** 2 * sigma_22 + grad[3] -= x_inv_sigma_y_numerator * np.cos(x[4]) ** 2 * sigma_11 + grad[3] -= x_inv_sigma_y_numerator * 2 * sigma_12 * np.sin(x[4]) * np.cos(x[4]) + grad[3] /= det_sigma ** 2 + 1e-8 + + grad[4] = (x[3] - x[2]) * ( + 2 * mu_1 * mu_2 * np.cos(2 * x[4]) - (mu_1 ** 2 - mu_2 ** 2) * np.sin(2 * x[4]) + ) + grad[4] *= det_sigma + grad[4] -= x_inv_sigma_y_numerator * (x[3] - x[2]) * np.sin(2 * x[4]) * sigma_22 + grad[4] -= x_inv_sigma_y_numerator * (x[2] - x[3]) * np.sin(2 * x[4]) * sigma_11 + grad[4] -= x_inv_sigma_y_numerator * 2 * sigma_12 * (x[2] - x[3]) * np.cos(2 * x[4]) + grad[4] /= det_sigma ** 2 + 1e-8 + + return dist, grad + + +@numba.njit(fastmath=True) +def spherical_gaussian_grad(x, y): # pragma: no cover + mu_1 = x[0] - y[0] + mu_2 = x[1] - y[1] + + sigma = x[2] + y[2] + sigma_sign = np.sign(sigma) + + if sigma == 0: + return 10.0, np.array([0.0, 0.0, -1.0], dtype=np.float32) + + dist = ( + (mu_1 ** 2 + mu_2 ** 2) / np.abs(sigma) + + 2 * np.log(np.abs(sigma)) + + np.log(2 * np.pi) + ) + grad = np.empty(3, dtype=np.float32) + + grad[0] = (2 * mu_1) / np.abs(sigma) + grad[1] = (2 * mu_2) / np.abs(sigma) + grad[2] = sigma_sign * ( + -(mu_1 ** 2 + mu_2 ** 2) / (sigma ** 2) + (2 / np.abs(sigma)) + ) + + return dist, grad + + +# Special discrete distances -- where x and y are objects, not vectors + + +def get_discrete_params(data, metric): + if metric == "ordinal": + return {"support_size": float(data.max() - data.min()) / 2.0} + elif metric == "count": + min_count = scipy.stats.tmin(data) + max_count = scipy.stats.tmax(data) + lambda_ = scipy.stats.tmean(data) + normalisation = count_distance(min_count, max_count, poisson_lambda=lambda_) + return { + "poisson_lambda": lambda_, + "normalisation": normalisation / 2.0, # heuristic + } + elif metric == "string": + lengths = np.array([len(x) for x in data]) + max_length = scipy.stats.tmax(lengths) + max_dist = max_length / 1.5 # heuristic + normalisation = max_dist / 2.0 # heuristic + return {"normalisation": normalisation, "max_dist": max_dist / 2.0} # heuristic + + else: + return {} + + +@numba.jit() +def categorical_distance(x, y): + if x == y: + return 0.0 + else: + return 1.0 + + +@numba.jit() +def hierarchical_categorical_distance(x, y, cat_hierarchy=[{}]): + n_levels = float(len(cat_hierarchy)) + for level, cats in enumerate(cat_hierarchy): + if cats[x] == cats[y]: + return float(level) / n_levels + else: + return 1.0 + + +@numba.njit() +def ordinal_distance(x, y, support_size=1.0): + return abs(x - y) / support_size + + +@numba.jit() +def count_distance(x, y, poisson_lambda=1.0, normalisation=1.0): + lo = int(min(x, y)) + hi = int(max(x, y)) + + log_lambda = np.log(poisson_lambda) + + if lo < 2: + log_k_factorial = 0.0 + elif lo < 10: + log_k_factorial = 0.0 + for k in range(2, lo): + log_k_factorial += np.log(k) + else: + log_k_factorial = approx_log_Gamma(lo + 1) + + result = 0.0 + + for k in range(lo, hi): + result += k * log_lambda - poisson_lambda - log_k_factorial + log_k_factorial += np.log(k) + + return result / normalisation + + +@numba.njit() +def levenshtein(x, y, normalisation=1.0, max_distance=20): + x_len, y_len = len(x), len(y) + + # Opt out of some comparisons + if abs(x_len - y_len) > max_distance: + return abs(x_len - y_len) / normalisation + + v0 = np.arange(y_len + 1).astype(np.float64) + v1 = np.zeros(y_len + 1) + + for i in range(x_len): + + v1[i] = i + 1 + + for j in range(y_len): + deletion_cost = v0[j + 1] + 1 + insertion_cost = v1[j] + 1 + substitution_cost = int(x[i] == y[j]) + + v1[j + 1] = min(deletion_cost, insertion_cost, substitution_cost) + + v0 = v1 + + # Abort early if we've already exceeded max_dist + if np.min(v0) > max_distance: + return max_distance / normalisation + + return v0[y_len] / normalisation + + +named_distances = { + # general minkowski distances + "euclidean": euclidean, + "l2": euclidean, + "manhattan": manhattan, + "taxicab": manhattan, + "l1": manhattan, + "chebyshev": chebyshev, + "linfinity": chebyshev, + "linfty": chebyshev, + "linf": chebyshev, + "minkowski": minkowski, + "poincare": poincare, + # Standardised/weighted distances + "seuclidean": standardised_euclidean, + "standardised_euclidean": standardised_euclidean, + "wminkowski": weighted_minkowski, + "weighted_minkowski": weighted_minkowski, + "mahalanobis": mahalanobis, + # Other distances + "canberra": canberra, + "cosine": cosine, + "correlation": correlation, + "hellinger": hellinger, + "haversine": haversine, + "braycurtis": bray_curtis, + "ll_dirichlet": ll_dirichlet, + "symmetric_kl": symmetric_kl, + # Binary distances + "hamming": hamming, + "jaccard": jaccard, + "dice": dice, + "matching": matching, + "kulsinski": kulsinski, + "rogerstanimoto": rogers_tanimoto, + "russellrao": russellrao, + "sokalsneath": sokal_sneath, + "sokalmichener": sokal_michener, + "yule": yule, + # Special discrete distances + "categorical": categorical_distance, + "ordinal": ordinal_distance, + "hierarchical_categorical": hierarchical_categorical_distance, + "count": count_distance, + "string": levenshtein, +} + +named_distances_with_gradients = { + # general minkowski distances + "euclidean": euclidean_grad, + "l2": euclidean_grad, + "manhattan": manhattan_grad, + "taxicab": manhattan_grad, + "l1": manhattan_grad, + "chebyshev": chebyshev_grad, + "linfinity": chebyshev_grad, + "linfty": chebyshev_grad, + "linf": chebyshev_grad, + "minkowski": minkowski_grad, + # Standardised/weighted distances + "seuclidean": standardised_euclidean_grad, + "standardised_euclidean": standardised_euclidean_grad, + "wminkowski": weighted_minkowski_grad, + "weighted_minkowski": weighted_minkowski_grad, + "mahalanobis": mahalanobis_grad, + # Other distances + "canberra": canberra_grad, + "cosine": cosine_grad, + "correlation": correlation_grad, + "hellinger": hellinger_grad, + "haversine": haversine_grad, + "braycurtis": bray_curtis_grad, + "symmetric_kl": symmetric_kl_grad, + # Special embeddings + "spherical_gaussian_energy": spherical_gaussian_energy_grad, + "diagonal_gaussian_energy": diagonal_gaussian_energy_grad, + "gaussian_energy": gaussian_energy_grad, + "hyperboloid": hyperboloid_grad, +} + +DISCRETE_METRICS = ( + "categorical", + "hierarchical_categorical", + "ordinal", + "count", + "string", +) + +SPECIAL_METRICS = ( + "hellinger", + "ll_dirichlet", + "symmetric_kl", + "poincare", + hellinger, + ll_dirichlet, + symmetric_kl, + poincare, +) + + +@numba.njit(parallel=True) +def parallel_special_metric(X, Y=None, metric=hellinger): + if Y is None: + result = np.zeros((X.shape[0], X.shape[0])) + + for i in range(X.shape[0]): + for j in range(i + 1, X.shape[0]): + result[i, j] = metric(X[i], X[j]) + result[j, i] = result[i, j] + else: + result = np.zeros((X.shape[0], Y.shape[0])) + + for i in range(X.shape[0]): + for j in range(Y.shape[0]): + result[i, j] = metric(X[i], Y[j]) + + return result + + +# We can gain efficiency by chunking the matrix into blocks; +# this keeps data vectors in cache better +@numba.njit(parallel=True, nogil=True) +def chunked_parallel_special_metric(X, Y=None, metric=hellinger, chunk_size=16): + if Y is None: + XX, symmetrical = X, True + row_size = col_size = X.shape[0] + else: + XX, symmetrical = Y, False + row_size, col_size = X.shape[0], Y.shape[0] + + result = np.zeros((row_size, col_size), dtype=np.float32) + n_row_chunks = (row_size // chunk_size) + 1 + for chunk_idx in numba.prange(n_row_chunks): + n = chunk_idx * chunk_size + chunk_end_n = min(n + chunk_size, row_size) + m_start = n if symmetrical else 0 + for m in range(m_start, col_size, chunk_size): + chunk_end_m = min(m + chunk_size, col_size) + for i in range(n, chunk_end_n): + for j in range(m, chunk_end_m): + result[i, j] = metric(X[i], XX[j]) + return result + + +def pairwise_special_metric(X, Y=None, metric="hellinger", kwds=None): + if callable(metric): + if kwds is not None: + kwd_vals = tuple(kwds.values()) + else: + kwd_vals = () + + @numba.njit(fastmath=True) + def _partial_metric(_X, _Y=None): + return metric(_X, _Y, *kwd_vals) + + return pairwise_distances(X, Y, metric=_partial_metric) + else: + special_metric_func = named_distances[metric] + return parallel_special_metric(X, Y, metric=special_metric_func) \ No newline at end of file diff --git a/neo_force_scheme/kruskal_stress.py b/neo_force_scheme/kruskal_stress.py new file mode 100644 index 0000000..31dae60 --- /dev/null +++ b/neo_force_scheme/kruskal_stress.py @@ -0,0 +1,25 @@ +import math +from numba import njit, prange + + +@njit(parallel=True) +def kruskal_stress(distance_matrix, projection): + size = len(projection) + total = len(distance_matrix) + + den = 0 + num = 0 + + for i in prange(size): + for j in prange(size): + dr2 = math.sqrt((projection[i][0] - projection[j][0]) * (projection[i][0] - projection[j][0]) + + (projection[i][1] - projection[j][1]) * (projection[i][1] - projection[j][1])) + + r = (i + j - math.fabs(i - j)) / 2 # min(i,j) + s = (i + j + math.fabs(i - j)) / 2 # max(i,j) + drn = distance_matrix[int(total - ((size - r) * (size - r + 1) / 2) + (s - r))] + + num += (drn - dr2) * (drn - dr2) + den += drn * drn + + return math.sqrt(num / den) diff --git a/neo_force_scheme/utils.py b/neo_force_scheme/utils.py new file mode 100644 index 0000000..d8e6994 --- /dev/null +++ b/neo_force_scheme/utils.py @@ -0,0 +1,99 @@ +import math +import pickle + +import numba +import numpy as np + +from . import distances + +MACHINE_EPSILON = np.finfo(np.double).eps + + +def read_distance_matrix(filename: str) -> [float, np.array]: + # calculating the size + with open(filename) as fp: + line = fp.readline() + tokens = line.strip().split(' ') + + size = len(tokens) + distance_matrix = np.zeros(int(size * (size + 1) / 2), dtype=np.float32) + + # reading line per line + row = 0 + k = 0 + with open(filename) as fp: + line = fp.readline() + while line: + tokens = line.strip().split(' ') + for column in range(row, size): + distance_matrix[k] = float(tokens[column]) + k = k + 1 + line = fp.readline() + row = row + 1 + return size, distance_matrix + + +def pickle_load_matrix(filename): + with open(filename, 'rb') as f: + data = pickle.load(f) + return data[0], data[1] + + +def pickle_save_matrix(filename, distance_matrix, size): + with open(filename, 'wb') as f: + pickle.dump([distance_matrix, size], f) + + +@numba.njit(parallel=True, fastmath=True) +def move(ins1, distance_matrix, projection, learning_rate): + size = len(projection) + total = len(distance_matrix) + error = 0 + + for ins2 in numba.prange(size): + if ins1 != ins2: + x1x2 = projection[ins2][0] - projection[ins1][0] + y1y2 = projection[ins2][1] - projection[ins1][1] + dr2 = max(math.sqrt(x1x2 * x1x2 + y1y2 * y1y2), 0.0001) + + # getting te index in the distance matrix and getting the value + r = (ins1 + ins2 - math.fabs(ins1 - ins2)) / 2 # min(i,j) + s = (ins1 + ins2 + math.fabs(ins1 - ins2)) / 2 # max(i,j) + drn = distance_matrix[int(total - ((size - r) * (size - r + 1) / 2) + (s - r))] + + # calculate the movement + delta = (drn - dr2) + error += math.fabs(delta) + + # moving + projection[ins2][0] += learning_rate * delta * (x1x2 / dr2) + projection[ins2][1] += learning_rate * delta * (y1y2 / dr2) + + return error / size + + +@numba.njit(parallel=True, fastmath=True) +def iteration(index, distance_matrix, projection, learning_rate): + size = len(projection) + error = 0 + + for i in numba.prange(size): + ins1 = index[i] + error += move(ins1, distance_matrix, projection, learning_rate) + + return error / size + + +@numba.njit(parallel=True, fastmath=True) +def create_triangular_distance_matrix( + data, + metric): + distance_matrix = np.zeros(int((data.shape[0] + 1) * data.shape[0] / 2)) + size = len(data) + + k = 0 + for i in range(size): + for j in range(i, size): + distance_matrix[k] = metric(data[i], data[j]) + k = k + 1 + return distance_matrix diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..b02a8e0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +matplotlib~=3.4.2 +numba~=0.53.1 +numpy~=1.20.3 +pandas~=1.2.4 +scikit-learn~=0.24.2 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..ecf810f --- /dev/null +++ b/setup.py @@ -0,0 +1,29 @@ +import os + +from setuptools import setup + + +def read(fname): + return open(os.path.join(os.path.dirname(__file__), fname)).read() + + +setup( + name="neo_force_scheme", + version="0.0.1", + author="Leonardo Christino", + author_email="christinoleo@dal.ca", + description="NeoForceSceme, an extention of the original ForceScheme with performance improvements", + license="MIT", + keywords="gpu numba forcescheme projection dimenstionality reduction", + url="https://github.com/visml/neo_force_scheme", + packages=['neo_force_scheme'], + long_description=read('README.md'), + classifiers=[ + "Development Status :: 2 - Pre-Alpha", + "Environment :: GPU :: NVIDIA CUDA", + "Programming Language :: Python", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries :: Python Modules", + "License :: OSI Approved :: MIT License", + ], +)