From 4cdceaab5271a5b51463ec562c8eb55f96b771c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 08:43:38 +0000 Subject: [PATCH] Bump numpy from 1.19.5 to 1.21.6 (#11078) * Bump numpy from 1.19.5 to 1.21.6 Bumps [numpy](https://github.com/numpy/numpy) from 1.19.5 to 1.21.6. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst.txt) - [Commits](https://github.com/numpy/numpy/compare/v1.19.5...v1.21.6) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fixed mypy errors for numpy 1.21.6 upgrade * removed duplicate np.array call Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Thomas Werkmeister Co-authored-by: melindaloubser1 --- poetry.lock | 339 ++++++++++-------- pyproject.toml | 2 +- rasa/core/evaluation/marker_stats.py | 16 +- .../featurizers/single_state_featurizer.py | 3 +- rasa/core/policies/ted_policy.py | 39 +- .../core/policies/unexpected_intent_policy.py | 34 +- rasa/nlu/classifiers/diet_classifier.py | 34 +- .../classifiers/sklearn_intent_classifier.py | 11 +- .../dense_featurizer/dense_featurizer.py | 5 +- .../dense_featurizer/lm_featurizer.py | 9 +- .../dense_featurizer/mitie_featurizer.py | 7 +- .../lexical_syntactic_featurizer.py | 6 +- rasa/shared/nlu/training_data/features.py | 11 +- rasa/utils/common.py | 11 +- rasa/utils/plotting.py | 24 +- rasa/utils/tensorflow/data_generator.py | 5 +- rasa/utils/tensorflow/model_data.py | 33 +- rasa/utils/tensorflow/model_data_utils.py | 12 +- rasa/utils/tensorflow/models.py | 36 +- rasa/utils/tensorflow/types.py | 6 + rasa/utils/train_utils.py | 2 +- 21 files changed, 399 insertions(+), 246 deletions(-) create mode 100644 rasa/utils/tensorflow/types.py diff --git a/poetry.lock b/poetry.lock index 65f13338da8f..d4bc5a28833e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -206,10 +206,10 @@ optional = false python-versions = ">=3.5" [package.extras] -tests_no_zope = ["cloudpickle", "pytest-mypy-plugins", "mypy (>=0.900,!=0.940)", "pytest (>=4.3.0)", "pympler", "hypothesis", "coverage[toml] (>=5.0.2)"] -tests = ["cloudpickle", "zope.interface", "pytest-mypy-plugins", "mypy (>=0.900,!=0.940)", "pytest (>=4.3.0)", "pympler", "hypothesis", "coverage[toml] (>=5.0.2)"] -docs = ["sphinx-notfound-page", "zope.interface", "sphinx", "furo"] -dev = ["cloudpickle", "pre-commit", "sphinx-notfound-page", "sphinx", "furo", "zope.interface", "pytest-mypy-plugins", "mypy (>=0.900,!=0.940)", "pytest (>=4.3.0)", "pympler", "hypothesis", "coverage[toml] (>=5.0.2)"] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] +docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"] [[package]] name = "azure-core" @@ -318,14 +318,14 @@ numpy = ">=1.15.0" [[package]] name = "boto3" -version = "1.24.50" +version = "1.24.51" description = "The AWS SDK for Python" category = "main" optional = false python-versions = ">= 3.7" [package.dependencies] -botocore = ">=1.27.50,<1.28.0" +botocore = ">=1.27.51,<1.28.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -334,7 +334,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.27.50" +version = "1.27.51" description = "Low-level, data-driven core of boto 3." category = "main" optional = false @@ -735,8 +735,8 @@ six = ">=1.16.0,<2.0.0" sortedcontainers = ">=2.4.0,<3.0.0" [package.extras] -aioredis = ["aioredis (>=2.0.1,<3.0.0)"] lua = ["lupa (>=1.13,<2.0)"] +aioredis = ["aioredis (>=2.0.1,<3.0.0)"] [[package]] name = "fbmessenger" @@ -809,7 +809,7 @@ python-versions = "*" [[package]] name = "freezegun" -version = "1.2.1" +version = "1.2.2" description = "Let your Python tests travel through time" category = "dev" optional = false @@ -835,27 +835,27 @@ optional = false python-versions = ">=3.7" [package.extras] -tqdm = ["tqdm"] -ssh = ["paramiko"] -smb = ["smbprotocol"] -sftp = ["paramiko"] -s3 = ["s3fs"] -oci = ["ocifs"] -libarchive = ["libarchive-c"] -http = ["aiohttp", "requests"] -hdfs = ["pyarrow (>=1)"] -gui = ["panel"] -gs = ["gcsfs"] -github = ["requests"] -git = ["pygit2"] -gcs = ["gcsfs"] -fuse = ["fusepy"] -entrypoints = ["importlib-metadata"] -dropbox = ["dropbox", "requests", "dropboxdrivefs"] -dask = ["distributed", "dask"] -arrow = ["pyarrow (>=1)"] -adl = ["adlfs"] abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dropbox = ["dropboxdrivefs", "requests", "dropbox"] +entrypoints = ["importlib-metadata"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["requests", "aiohttp"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] [[package]] name = "future" @@ -1217,8 +1217,8 @@ python-versions = ">=3.7" zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] -testing = ["pytest-mypy (>=0.9.1)", "pytest-black (>=0.3.7)", "pytest-enabler (>=1.3)", "pytest-cov", "pytest-flake8", "pytest-checkdocs (>=2.4)", "pytest (>=6)"] -docs = ["jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "jaraco.packaging (>=9)", "sphinx"] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] [[package]] name = "incremental" @@ -1308,9 +1308,9 @@ python-versions = ">=2.7" importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -testing = ["pytest-flake8 (>=1.1.1)", "jsonlib", "enum34", "pytest-flake8 (<1.1.0)", "sqlalchemy", "scikit-learn", "pymongo", "pandas", "numpy", "feedparser", "ecdsa", "pytest-cov", "pytest-black-multipy", "pytest-checkdocs (>=1.2.3)", "pytest (>=3.5,!=3.7.3)"] -"testing.libs" = ["yajl", "ujson", "simplejson"] -docs = ["rst.linker (>=1.9)", "jaraco.packaging (>=3.2)", "sphinx"] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["pytest (>=3.5,!=3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-black-multipy", "pytest-cov", "ecdsa", "feedparser", "numpy", "pandas", "pymongo", "scikit-learn", "sqlalchemy", "pytest-flake8 (<1.1.0)", "enum34", "jsonlib", "pytest-flake8 (>=1.1.1)"] +"testing.libs" = ["simplejson", "ujson", "yajl"] [[package]] name = "jsonschema" @@ -1599,9 +1599,9 @@ typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} typing-extensions = ">=3.10" [package.extras] -reports = ["lxml"] -python2 = ["typed-ast (>=1.4.0,<2)"] dmypy = ["psutil (>=4.0)"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] [[package]] name = "mypy-extensions" @@ -1640,11 +1640,11 @@ typing-extensions = ">=3.0.0" [[package]] name = "numpy" -version = "1.19.5" +version = "1.21.6" description = "NumPy is the fundamental package for array computing with Python." category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7,<3.11" [[package]] name = "oauthlib" @@ -2047,7 +2047,7 @@ python-versions = ">=3.7" [[package]] name = "pytelegrambotapi" -version = "4.6.1" +version = "4.7.0" description = "Python Telegram bot api." category = "main" optional = false @@ -2114,7 +2114,7 @@ coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] -testing = ["virtualenv", "pytest-xdist", "six", "process-tests", "hunter", "fields"] +testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-forked" @@ -2227,7 +2227,7 @@ client = ["requests (>=2.21.0)", "websocket-client (>=0.54.0)"] [[package]] name = "pytz" -version = "2022.2" +version = "2022.2.1" description = "World timezone definitions, modern and historical" category = "main" optional = false @@ -2596,29 +2596,29 @@ python-versions = "*" [package.dependencies] certifi = "*" urllib3 = [ - {version = ">=1.26.11", markers = "python_version >= \"3.6\""}, {version = ">=1.26.9", markers = "python_version >= \"3.5\""}, + {version = ">=1.26.11", markers = "python_version >= \"3.6\""}, ] [package.extras] -tornado = ["tornado (>=5)"] -starlette = ["starlette (>=0.19.1)"] -sqlalchemy = ["sqlalchemy (>=1.2)"] -sanic = ["sanic (>=0.8)"] -rq = ["rq (>=0.6)"] -quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] -pyspark = ["pyspark (>=2.4.4)"] -pure_eval = ["asttokens", "executing", "pure-eval"] -httpx = ["httpx (>=0.16.0)"] -flask = ["blinker (>=1.1)", "flask (>=0.11)"] -fastapi = ["fastapi (>=0.79.0)"] -falcon = ["falcon (>=1.4)"] -django = ["django (>=1.8)"] -chalice = ["chalice (>=1.16.0)"] -celery = ["celery (>=3)"] -bottle = ["bottle (>=0.12.13)"] -beam = ["apache-beam (>=2.12)"] aiohttp = ["aiohttp (>=3.5)"] +beam = ["apache-beam (>=2.12)"] +bottle = ["bottle (>=0.12.13)"] +celery = ["celery (>=3)"] +chalice = ["chalice (>=1.16.0)"] +django = ["django (>=1.8)"] +falcon = ["falcon (>=1.4)"] +fastapi = ["fastapi (>=0.79.0)"] +flask = ["flask (>=0.11)", "blinker (>=1.1)"] +httpx = ["httpx (>=0.16.0)"] +pure_eval = ["pure-eval", "executing", "asttokens"] +pyspark = ["pyspark (>=2.4.4)"] +quart = ["quart (>=0.16.1)", "blinker (>=1.1)"] +rq = ["rq (>=0.6)"] +sanic = ["sanic (>=0.8)"] +sqlalchemy = ["sqlalchemy (>=1.2)"] +starlette = ["starlette (>=0.19.1)"] +tornado = ["tornado (>=5)"] [[package]] name = "six" @@ -3322,7 +3322,7 @@ test = ["pytest"] [[package]] name = "tzdata" -version = "2022.1" +version = "2022.2" description = "Provider of IANA time zone data" category = "main" optional = false @@ -3370,9 +3370,9 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" [package.extras] +brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"] +secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] -secure = ["ipaddress", "certifi", "idna (>=2.0.0)", "cryptography (>=1.3.4)", "pyOpenSSL (>=0.14)"] -brotli = ["brotlipy (>=0.6.0)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] [[package]] name = "uvloop" @@ -3509,7 +3509,7 @@ transformers = ["transformers", "sentencepiece"] [metadata] lock-version = "1.1" python-versions = ">=3.7,<3.10" -content-hash = "45df423d304e37e671c18921e579149cef61e9b3f7ce68a6475cb69f2ba37200" +content-hash = "cdede530105e32f23b4e95f0f4994e183656e1558ba4de493477e9aa4e61248f" [metadata.files] absl-py = [] @@ -3518,13 +3518,89 @@ aiofiles = [ {file = "aiofiles-0.8.0-py3-none-any.whl", hash = "sha256:7a973fc22b29e9962d0897805ace5856e6a566ab1f0c8e5c91ff6c866519c937"}, {file = "aiofiles-0.8.0.tar.gz", hash = "sha256:8334f23235248a3b2e83b2c3a78a22674f39969b96397126cc93664d9a901e59"}, ] -aiohttp = [] +aiohttp = [ + {file = "aiohttp-3.8.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1ed0b6477896559f17b9eaeb6d38e07f7f9ffe40b9f0f9627ae8b9926ae260a8"}, + {file = "aiohttp-3.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7dadf3c307b31e0e61689cbf9e06be7a867c563d5a63ce9dca578f956609abf8"}, + {file = "aiohttp-3.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a79004bb58748f31ae1cbe9fa891054baaa46fb106c2dc7af9f8e3304dc30316"}, + {file = "aiohttp-3.8.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12de6add4038df8f72fac606dff775791a60f113a725c960f2bab01d8b8e6b15"}, + {file = "aiohttp-3.8.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f0d5f33feb5f69ddd57a4a4bd3d56c719a141080b445cbf18f238973c5c9923"}, + {file = "aiohttp-3.8.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eaba923151d9deea315be1f3e2b31cc39a6d1d2f682f942905951f4e40200922"}, + {file = "aiohttp-3.8.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:099ebd2c37ac74cce10a3527d2b49af80243e2a4fa39e7bce41617fbc35fa3c1"}, + {file = "aiohttp-3.8.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2e5d962cf7e1d426aa0e528a7e198658cdc8aa4fe87f781d039ad75dcd52c516"}, + {file = "aiohttp-3.8.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fa0ffcace9b3aa34d205d8130f7873fcfefcb6a4dd3dd705b0dab69af6712642"}, + {file = "aiohttp-3.8.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61bfc23df345d8c9716d03717c2ed5e27374e0fe6f659ea64edcd27b4b044cf7"}, + {file = "aiohttp-3.8.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:31560d268ff62143e92423ef183680b9829b1b482c011713ae941997921eebc8"}, + {file = "aiohttp-3.8.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:01d7bdb774a9acc838e6b8f1d114f45303841b89b95984cbb7d80ea41172a9e3"}, + {file = "aiohttp-3.8.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:97ef77eb6b044134c0b3a96e16abcb05ecce892965a2124c566af0fd60f717e2"}, + {file = "aiohttp-3.8.1-cp310-cp310-win32.whl", hash = "sha256:c2aef4703f1f2ddc6df17519885dbfa3514929149d3ff900b73f45998f2532fa"}, + {file = "aiohttp-3.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:713ac174a629d39b7c6a3aa757b337599798da4c1157114a314e4e391cd28e32"}, + {file = "aiohttp-3.8.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:473d93d4450880fe278696549f2e7aed8cd23708c3c1997981464475f32137db"}, + {file = "aiohttp-3.8.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b5eeae8e019e7aad8af8bb314fb908dd2e028b3cdaad87ec05095394cce632"}, + {file = "aiohttp-3.8.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af642b43ce56c24d063325dd2cf20ee012d2b9ba4c3c008755a301aaea720ad"}, + {file = "aiohttp-3.8.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3630c3ef435c0a7c549ba170a0633a56e92629aeed0e707fec832dee313fb7a"}, + {file = "aiohttp-3.8.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4a4a4e30bf1edcad13fb0804300557aedd07a92cabc74382fdd0ba6ca2661091"}, + {file = "aiohttp-3.8.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6f8b01295e26c68b3a1b90efb7a89029110d3a4139270b24fda961893216c440"}, + {file = "aiohttp-3.8.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a25fa703a527158aaf10dafd956f7d42ac6d30ec80e9a70846253dd13e2f067b"}, + {file = "aiohttp-3.8.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:5bfde62d1d2641a1f5173b8c8c2d96ceb4854f54a44c23102e2ccc7e02f003ec"}, + {file = "aiohttp-3.8.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:51467000f3647d519272392f484126aa716f747859794ac9924a7aafa86cd411"}, + {file = "aiohttp-3.8.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:03a6d5349c9ee8f79ab3ff3694d6ce1cfc3ced1c9d36200cb8f08ba06bd3b782"}, + {file = "aiohttp-3.8.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:102e487eeb82afac440581e5d7f8f44560b36cf0bdd11abc51a46c1cd88914d4"}, + {file = "aiohttp-3.8.1-cp36-cp36m-win32.whl", hash = "sha256:4aed991a28ea3ce320dc8ce655875e1e00a11bdd29fe9444dd4f88c30d558602"}, + {file = "aiohttp-3.8.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b0e20cddbd676ab8a64c774fefa0ad787cc506afd844de95da56060348021e96"}, + {file = "aiohttp-3.8.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:37951ad2f4a6df6506750a23f7cbabad24c73c65f23f72e95897bb2cecbae676"}, + {file = "aiohttp-3.8.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c23b1ad869653bc818e972b7a3a79852d0e494e9ab7e1a701a3decc49c20d51"}, + {file = "aiohttp-3.8.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15b09b06dae900777833fe7fc4b4aa426556ce95847a3e8d7548e2d19e34edb8"}, + {file = "aiohttp-3.8.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:477c3ea0ba410b2b56b7efb072c36fa91b1e6fc331761798fa3f28bb224830dd"}, + {file = "aiohttp-3.8.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2f2f69dca064926e79997f45b2f34e202b320fd3782f17a91941f7eb85502ee2"}, + {file = "aiohttp-3.8.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ef9612483cb35171d51d9173647eed5d0069eaa2ee812793a75373447d487aa4"}, + {file = "aiohttp-3.8.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6d69f36d445c45cda7b3b26afef2fc34ef5ac0cdc75584a87ef307ee3c8c6d00"}, + {file = "aiohttp-3.8.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:55c3d1072704d27401c92339144d199d9de7b52627f724a949fc7d5fc56d8b93"}, + {file = "aiohttp-3.8.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b9d00268fcb9f66fbcc7cd9fe423741d90c75ee029a1d15c09b22d23253c0a44"}, + {file = "aiohttp-3.8.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:07b05cd3305e8a73112103c834e91cd27ce5b4bd07850c4b4dbd1877d3f45be7"}, + {file = "aiohttp-3.8.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c34dc4958b232ef6188c4318cb7b2c2d80521c9a56c52449f8f93ab7bc2a8a1c"}, + {file = "aiohttp-3.8.1-cp37-cp37m-win32.whl", hash = "sha256:d2f9b69293c33aaa53d923032fe227feac867f81682f002ce33ffae978f0a9a9"}, + {file = "aiohttp-3.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6ae828d3a003f03ae31915c31fa684b9890ea44c9c989056fea96e3d12a9fa17"}, + {file = "aiohttp-3.8.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0c7ebbbde809ff4e970824b2b6cb7e4222be6b95a296e46c03cf050878fc1785"}, + {file = "aiohttp-3.8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8b7ef7cbd4fec9a1e811a5de813311ed4f7ac7d93e0fda233c9b3e1428f7dd7b"}, + {file = "aiohttp-3.8.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c3d6a4d0619e09dcd61021debf7059955c2004fa29f48788a3dfaf9c9901a7cd"}, + {file = "aiohttp-3.8.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:718626a174e7e467f0558954f94af117b7d4695d48eb980146016afa4b580b2e"}, + {file = "aiohttp-3.8.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:589c72667a5febd36f1315aa6e5f56dd4aa4862df295cb51c769d16142ddd7cd"}, + {file = "aiohttp-3.8.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ed076098b171573161eb146afcb9129b5ff63308960aeca4b676d9d3c35e700"}, + {file = "aiohttp-3.8.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:086f92daf51a032d062ec5f58af5ca6a44d082c35299c96376a41cbb33034675"}, + {file = "aiohttp-3.8.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:11691cf4dc5b94236ccc609b70fec991234e7ef8d4c02dd0c9668d1e486f5abf"}, + {file = "aiohttp-3.8.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:31d1e1c0dbf19ebccbfd62eff461518dcb1e307b195e93bba60c965a4dcf1ba0"}, + {file = "aiohttp-3.8.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:11a67c0d562e07067c4e86bffc1553f2cf5b664d6111c894671b2b8712f3aba5"}, + {file = "aiohttp-3.8.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:bb01ba6b0d3f6c68b89fce7305080145d4877ad3acaed424bae4d4ee75faa950"}, + {file = "aiohttp-3.8.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:44db35a9e15d6fe5c40d74952e803b1d96e964f683b5a78c3cc64eb177878155"}, + {file = "aiohttp-3.8.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:844a9b460871ee0a0b0b68a64890dae9c415e513db0f4a7e3cab41a0f2fedf33"}, + {file = "aiohttp-3.8.1-cp38-cp38-win32.whl", hash = "sha256:7d08744e9bae2ca9c382581f7dce1273fe3c9bae94ff572c3626e8da5b193c6a"}, + {file = "aiohttp-3.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:04d48b8ce6ab3cf2097b1855e1505181bdd05586ca275f2505514a6e274e8e75"}, + {file = "aiohttp-3.8.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f5315a2eb0239185af1bddb1abf472d877fede3cc8d143c6cddad37678293237"}, + {file = "aiohttp-3.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a996d01ca39b8dfe77440f3cd600825d05841088fd6bc0144cc6c2ec14cc5f74"}, + {file = "aiohttp-3.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:13487abd2f761d4be7c8ff9080de2671e53fff69711d46de703c310c4c9317ca"}, + {file = "aiohttp-3.8.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea302f34477fda3f85560a06d9ebdc7fa41e82420e892fc50b577e35fc6a50b2"}, + {file = "aiohttp-3.8.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2f635ce61a89c5732537a7896b6319a8fcfa23ba09bec36e1b1ac0ab31270d2"}, + {file = "aiohttp-3.8.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e999f2d0e12eea01caeecb17b653f3713d758f6dcc770417cf29ef08d3931421"}, + {file = "aiohttp-3.8.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0770e2806a30e744b4e21c9d73b7bee18a1cfa3c47991ee2e5a65b887c49d5cf"}, + {file = "aiohttp-3.8.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d15367ce87c8e9e09b0f989bfd72dc641bcd04ba091c68cd305312d00962addd"}, + {file = "aiohttp-3.8.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c7cefb4b0640703eb1069835c02486669312bf2f12b48a748e0a7756d0de33d"}, + {file = "aiohttp-3.8.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:71927042ed6365a09a98a6377501af5c9f0a4d38083652bcd2281a06a5976724"}, + {file = "aiohttp-3.8.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:28d490af82bc6b7ce53ff31337a18a10498303fe66f701ab65ef27e143c3b0ef"}, + {file = "aiohttp-3.8.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b6613280ccedf24354406caf785db748bebbddcf31408b20c0b48cb86af76866"}, + {file = "aiohttp-3.8.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81e3d8c34c623ca4e36c46524a3530e99c0bc95ed068fd6e9b55cb721d408fb2"}, + {file = "aiohttp-3.8.1-cp39-cp39-win32.whl", hash = "sha256:7187a76598bdb895af0adbd2fb7474d7f6025d170bc0a1130242da817ce9e7d1"}, + {file = "aiohttp-3.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:1c182cb873bc91b411e184dab7a2b664d4fea2743df0e4d57402f7f3fa644bac"}, + {file = "aiohttp-3.8.1.tar.gz", hash = "sha256:fc5471e1a54de15ef71c1bc6ebe80d4dc681ea600e68bfd1cbce40427f0b7578"}, +] aioresponses = [ {file = "aioresponses-0.7.3-py2.py3-none-any.whl", hash = "sha256:7b1897169062c92fa87d6ecc503ac566ac87fbfacb2504f8ca81c8035a2eb068"}, {file = "aioresponses-0.7.3.tar.gz", hash = "sha256:2c64ed5710ee8cb4e958c569184dad12f4c9cd5939135cb38f88c6a8261cceb3"}, ] aiormq = [] -aiosignal = [] +aiosignal = [ + {file = "aiosignal-1.2.0-py3-none-any.whl", hash = "sha256:26e62109036cd181df6e6ad646f91f0dcfd05fe16d0cb924138ff2ab75d64e3a"}, + {file = "aiosignal-1.2.0.tar.gz", hash = "sha256:78ed67db6c7b7ced4f98e495e572106d5c432a93e1ddd1bf475e1dc05f5b7df2"}, +] analytics-python = [ {file = "analytics-python-1.4.0.tar.gz", hash = "sha256:a65141ab6e47db396f5bc5708b1db93ff9a99882d81fe808228afd5ebb6dfe5f"}, {file = "analytics_python-1.4.0-py2.py3-none-any.whl", hash = "sha256:3bff972beeb8a3f26607ccd9153484aa4f12eeeea4a693be685bf45aa66ddf99"}, @@ -3533,7 +3609,10 @@ anyio = [ {file = "anyio-3.6.1-py3-none-any.whl", hash = "sha256:cb29b9c70620506a9a8f87a309591713446953302d7d995344d0d7c6c0c9a7be"}, {file = "anyio-3.6.1.tar.gz", hash = "sha256:413adf95f93886e442aea925f3ee43baa5a765a64a0f52c6081894f9992fdd0b"}, ] -apscheduler = [] +apscheduler = [ + {file = "APScheduler-3.9.1-py2.py3-none-any.whl", hash = "sha256:ddc25a0ddd899de44d7f451f4375fb971887e65af51e41e5dcf681f59b8b2c9a"}, + {file = "APScheduler-3.9.1.tar.gz", hash = "sha256:65e6574b6395498d371d045f2a8a7e4f7d50c6ad21ef7313d15b1c7cf20df1e3"}, +] astunparse = [ {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, @@ -3542,7 +3621,10 @@ async-generator = [ {file = "async_generator-1.10-py3-none-any.whl", hash = "sha256:01c7bf666359b4967d2cda0000cc2e4af16a0ae098cbffcb8472fb9e8ad6585b"}, {file = "async_generator-1.10.tar.gz", hash = "sha256:6ebb3d106c12920aaae42ccb6f787ef5eefdcdd166ea3d628fa8476abe712144"}, ] -async-timeout = [] +async-timeout = [ + {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, + {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, +] asynctest = [] atomicwrites = [] attrs = [] @@ -3550,10 +3632,7 @@ azure-core = [ {file = "azure-core-1.22.1.zip", hash = "sha256:4b6e405268a33b873107796495cec3f2f1b1ffe935624ce0fbddff36d38d3a4d"}, {file = "azure_core-1.22.1-py3-none-any.whl", hash = "sha256:407381c74e2ccc16adb1f29c4a1b381ebd39e8661bbf60422926d8252d5b757d"}, ] -azure-storage-blob = [ - {file = "azure-storage-blob-12.11.0.zip", hash = "sha256:49535b3190bb69d0d9ff7a383246b14da4d2b1bdff60cae5f9173920c67ca7ee"}, - {file = "azure_storage_blob-12.11.0-py3-none-any.whl", hash = "sha256:f3dfa605aefb453e7489328b76811a937a411761d7a1613a58c3975c556ec778"}, -] +azure-storage-blob = [] backoff = [ {file = "backoff-1.10.0-py2.py3-none-any.whl", hash = "sha256:5e73e2cbe780e1915a204799dba0a01896f45f4385e636bcca7a0614d879d0cd"}, {file = "backoff-1.10.0.tar.gz", hash = "sha256:b8fba021fac74055ac05eb7c7bfce4723aedde6cd0a504e5326bcb0bdd6d19a4"}, @@ -3592,37 +3671,14 @@ black = [ {file = "black-22.6.0-py3-none-any.whl", hash = "sha256:ac609cf8ef5e7115ddd07d85d988d074ed00e10fbc3445aee393e70164a2219c"}, {file = "black-22.6.0.tar.gz", hash = "sha256:6c6d39e28aed379aec40da1c65434c77d75e65bb59a1e1c283de545fb4e7c6c9"}, ] -blis = [ - {file = "blis-0.7.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5b06fe3b94645ac5d93cbc7c0129639cc3e0d50b4efb361a20a9e160277a92"}, - {file = "blis-0.7.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:294421b720c2de904908de841464c667e1a5c5e9f3db6931dfa29cf369d3653a"}, - {file = "blis-0.7.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2778fe0ba0e25c157839fdd19ed66b9a340c92d4e92e707b7fa9aa21c51cb254"}, - {file = "blis-0.7.8-cp310-cp310-win_amd64.whl", hash = "sha256:0f7bfdee74ac695c35360ace00f2630c1b47406dc0b99ba9211bfa8588bfbed9"}, - {file = "blis-0.7.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:159a1a9b32213d99d1415789ac66ed8d23442a696d9d376c66d7b791d3eae575"}, - {file = "blis-0.7.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f5fa330ab66d0e92a845b1db361ec8bf3dc4bc7e0dc0ded94f36b8e9f731650"}, - {file = "blis-0.7.8-cp36-cp36m-win_amd64.whl", hash = "sha256:90f17543e0aa3bc379d139867467df2c365ffaf5b61988de12dbba6dbbc9fab4"}, - {file = "blis-0.7.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bfa56e7ef14ae607d8444eb344d22f252a2e0b0f9bfa4bdc9b0c48a9f96b5461"}, - {file = "blis-0.7.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17df5ac7d9a9dbbf0415f8f8392fbdf1790fa394f89d695bae5e2e7e361c852b"}, - {file = "blis-0.7.8-cp37-cp37m-win_amd64.whl", hash = "sha256:95d22d3007cb454d11a478331690629861f7d40b4668f9fccfd13b6507ed099b"}, - {file = "blis-0.7.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:96ff4c0c1ceab9f94c14b3281f3cef82f593c48c3b5f6169bd51cdcd315e0a6e"}, - {file = "blis-0.7.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2db369a4f95927be37e11790dd1ccbf99fd6201eaffbcf408546db847b7b5740"}, - {file = "blis-0.7.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63735128c9cae44dc6cbf7557327385df0c4ed2dc2c45a00dabfde1e4d00802d"}, - {file = "blis-0.7.8-cp38-cp38-win_amd64.whl", hash = "sha256:1e970ba1eb12ca38fb5d57f379472125bc3f5106c8214dc847fe79b027212135"}, - {file = "blis-0.7.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f576ad64b772b6fd7df6ef94986235f321983dc870d0f76d78c931bafc41cfa4"}, - {file = "blis-0.7.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e7b7b8bc8cf5e82958bbc393e0167318a930d394cbbf04c1ba18cfabaef5818"}, - {file = "blis-0.7.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b8ca1a2eb8f1e0563a592aae4b8682b66189ad560e3b8221d93eab0cb76582"}, - {file = "blis-0.7.8-cp39-cp39-win_amd64.whl", hash = "sha256:bf60f634481c3d0faf831ac4f2d1c75343e98f714dc88e3fb3c329758577e772"}, - {file = "blis-0.7.8.tar.gz", hash = "sha256:f7d541bb06323aa350163ba4a3ad00e8effb3b53d4c58ee6228224f3928b6c57"}, -] +blis = [] boto3 = [] botocore = [] cachecontrol = [ {file = "CacheControl-0.12.11-py2.py3-none-any.whl", hash = "sha256:2c75d6a8938cb1933c75c50184549ad42728a27e9f6b92fd677c3151aa72555b"}, {file = "CacheControl-0.12.11.tar.gz", hash = "sha256:a5b9fcc986b184db101aa280b42ecdcdfc524892596f606858e0b7a8b4d9e144"}, ] -cachetools = [ - {file = "cachetools-5.2.0-py3-none-any.whl", hash = "sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db"}, - {file = "cachetools-5.2.0.tar.gz", hash = "sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757"}, -] +cachetools = [] catalogue = [] certifi = [ {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, @@ -3698,14 +3754,14 @@ charset-normalizer = [ {file = "charset-normalizer-2.1.0.tar.gz", hash = "sha256:575e708016ff3a5e3681541cb9d79312c416835686d054a23accb873b254f413"}, {file = "charset_normalizer-2.1.0-py3-none-any.whl", hash = "sha256:5189b6f22b01957427f35b6a08d9a0bc45b46d3788ef5a92e978433c7a35f8a5"}, ] -click = [] +click = [ + {file = "click-8.0.4-py3-none-any.whl", hash = "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1"}, + {file = "click-8.0.4.tar.gz", hash = "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"}, +] click-default-group = [ {file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"}, ] -cloudpickle = [ - {file = "cloudpickle-2.1.0-py3-none-any.whl", hash = "sha256:b5c434f75c34624eedad3a14f2be5ac3b5384774d5b0e3caf905c21479e6c4b1"}, - {file = "cloudpickle-2.1.0.tar.gz", hash = "sha256:bb233e876a58491d9590a676f93c7a5473a08f747d5ab9df7f9ce564b3e7938e"}, -] +cloudpickle = [] colorama = [ {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, @@ -3824,10 +3880,7 @@ flatbuffers = [ {file = "flatbuffers-2.0-py2.py3-none-any.whl", hash = "sha256:3751954f0604580d3219ae49a85fafec9d85eec599c0b96226e1bc0b48e57474"}, {file = "flatbuffers-2.0.tar.gz", hash = "sha256:12158ab0272375eab8db2d663ae97370c33f152b27801fa6024e1d6105fd4dd2"}, ] -freezegun = [ - {file = "freezegun-1.2.1-py3-none-any.whl", hash = "sha256:15103a67dfa868ad809a8f508146e396be2995172d25f927e48ce51c0bf5cb09"}, - {file = "freezegun-1.2.1.tar.gz", hash = "sha256:b4c64efb275e6bc68dc6e771b17ffe0ff0f90b81a2a5189043550b6519926ba4"}, -] +freezegun = [] frozenlist = [] fsspec = [] future = [ @@ -3849,10 +3902,7 @@ gitpython = [ {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"}, {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"}, ] -google-api-core = [ - {file = "google-api-core-2.8.2.tar.gz", hash = "sha256:06f7244c640322b508b125903bb5701bebabce8832f85aba9335ec00b3d02edc"}, - {file = "google_api_core-2.8.2-py3-none-any.whl", hash = "sha256:93c6a91ccac79079ac6bbf8b74ee75db970cc899278b97d53bc012f35908cf50"}, -] +google-api-core = [] google-auth = [] google-auth-oauthlib = [ {file = "google-auth-oauthlib-0.4.6.tar.gz", hash = "sha256:a90a072f6993f2c327067bf65270046384cda5a8ecb20b94ea9a687f1f233a7a"}, @@ -3927,6 +3977,7 @@ greenlet = [ {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e5306482182170ade15c4b0d8386ded995a07d7cc2ca8f27958d34d6736497"}, {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6a36bb9474218c7a5b27ae476035497a6990e21d04c279884eb10d9b290f1b1"}, {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abb7a75ed8b968f3061327c433a0fbd17b729947b400747c334a9c29a9af6c58"}, + {file = "greenlet-1.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b336501a05e13b616ef81ce329c0e09ac5ed8c732d9ba7e3e983fcc1a9e86965"}, {file = "greenlet-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:14d4f3cd4e8b524ae9b8aa567858beed70c392fdec26dbdb0a8a418392e71708"}, {file = "greenlet-1.1.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:17ff94e7a83aa8671a25bf5b59326ec26da379ace2ebc4411d690d80a7fbcf23"}, {file = "greenlet-1.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9f3cba480d3deb69f6ee2c1825060177a22c7826431458c697df88e6aeb3caee"}, @@ -3939,6 +3990,7 @@ greenlet = [ {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9d29ca8a77117315101425ec7ec2a47a22ccf59f5593378fc4077ac5b754fce"}, {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21915eb821a6b3d9d8eefdaf57d6c345b970ad722f856cd71739493ce003ad08"}, {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eff9d20417ff9dcb0d25e2defc2574d10b491bf2e693b4e491914738b7908168"}, + {file = "greenlet-1.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b8c008de9d0daba7b6666aa5bbfdc23dcd78cafc33997c9b7741ff6353bafb7f"}, {file = "greenlet-1.1.2-cp36-cp36m-win32.whl", hash = "sha256:32ca72bbc673adbcfecb935bb3fb1b74e663d10a4b241aaa2f5a75fe1d1f90aa"}, {file = "greenlet-1.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f0214eb2a23b85528310dad848ad2ac58e735612929c8072f6093f3585fd342d"}, {file = "greenlet-1.1.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:b92e29e58bef6d9cfd340c72b04d74c4b4e9f70c9fa7c78b674d1fec18896dc4"}, @@ -3947,6 +3999,7 @@ greenlet = [ {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e12bdc622676ce47ae9abbf455c189e442afdde8818d9da983085df6312e7a1"}, {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c790abda465726cfb8bb08bd4ca9a5d0a7bd77c7ac1ca1b839ad823b948ea28"}, {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f276df9830dba7a333544bd41070e8175762a7ac20350786b322b714b0e654f5"}, + {file = "greenlet-1.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c5d5b35f789a030ebb95bff352f1d27a93d81069f2adb3182d99882e095cefe"}, {file = "greenlet-1.1.2-cp37-cp37m-win32.whl", hash = "sha256:64e6175c2e53195278d7388c454e0b30997573f3f4bd63697f88d855f7a6a1fc"}, {file = "greenlet-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b11548073a2213d950c3f671aa88e6f83cda6e2fb97a8b6317b1b5b33d850e06"}, {file = "greenlet-1.1.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9633b3034d3d901f0a46b7939f8c4d64427dfba6bbc5a36b1a67364cf148a1b0"}, @@ -3955,6 +4008,7 @@ greenlet = [ {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e859fcb4cbe93504ea18008d1df98dee4f7766db66c435e4882ab35cf70cac43"}, {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e44c8afdbe5467e4f7b5851be223be68adb4272f44696ee71fe46b7036a711"}, {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec8c433b3ab0419100bd45b47c9c8551248a5aee30ca5e9d399a0b57ac04651b"}, + {file = "greenlet-1.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2bde6792f313f4e918caabc46532aa64aa27a0db05d75b20edfc5c6f46479de2"}, {file = "greenlet-1.1.2-cp38-cp38-win32.whl", hash = "sha256:288c6a76705dc54fba69fbcb59904ae4ad768b4c768839b8ca5fdadec6dd8cfd"}, {file = "greenlet-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:8d2f1fb53a421b410751887eb4ff21386d119ef9cde3797bf5e7ed49fb51a3b3"}, {file = "greenlet-1.1.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:166eac03e48784a6a6e0e5f041cfebb1ab400b394db188c48b3a84737f505b67"}, @@ -3963,6 +4017,7 @@ greenlet = [ {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1692f7d6bc45e3200844be0dba153612103db241691088626a33ff1f24a0d88"}, {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7227b47e73dedaa513cdebb98469705ef0d66eb5a1250144468e9c3097d6b59b"}, {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ff61ff178250f9bb3cd89752df0f1dd0e27316a8bd1465351652b1b4a4cdfd3"}, + {file = "greenlet-1.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0051c6f1f27cb756ffc0ffbac7d2cd48cb0362ac1736871399a739b2885134d3"}, {file = "greenlet-1.1.2-cp39-cp39-win32.whl", hash = "sha256:f70a9e237bb792c7cc7e44c531fd48f5897961701cdaa06cf22fc14965c496cf"}, {file = "greenlet-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:013d61294b6cd8fe3242932c1c5e36e5d1db2c8afb58606c5a67efce62c1f5fd"}, {file = "greenlet-1.1.2.tar.gz", hash = "sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a"}, @@ -4038,10 +4093,7 @@ httpx = [ {file = "httpx-0.23.0-py3-none-any.whl", hash = "sha256:42974f577483e1e932c3cdc3cd2303e883cbfba17fe228b0f63589764d7b9c4b"}, {file = "httpx-0.23.0.tar.gz", hash = "sha256:f28eac771ec9eb4866d3fb4ab65abd42d38c424739e80c08d8d20570de60b0ef"}, ] -huggingface-hub = [ - {file = "huggingface_hub-0.8.1-py3-none-any.whl", hash = "sha256:a11fb8d696a26f927833d46b7633105fd864fd92a2beb1140cbf1b2f703dedb3"}, - {file = "huggingface_hub-0.8.1.tar.gz", hash = "sha256:75c70797da54b849f06c2cbf7ba2217250ee217230b9f65547d5db3c5bd84bb5"}, -] +huggingface-hub = [] humanfriendly = [ {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, @@ -4362,42 +4414,7 @@ networkx = [ {file = "networkx-2.6.3.tar.gz", hash = "sha256:c0946ed31d71f1b732b5aaa6da5a0388a345019af232ce2f49c766e2d6795c51"}, ] "nr.util" = [] -numpy = [ - {file = "numpy-1.19.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc6bd4fd593cb261332568485e20a0712883cf631f6f5e8e86a52caa8b2b50ff"}, - {file = "numpy-1.19.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:aeb9ed923be74e659984e321f609b9ba54a48354bfd168d21a2b072ed1e833ea"}, - {file = "numpy-1.19.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:8b5e972b43c8fc27d56550b4120fe6257fdc15f9301914380b27f74856299fea"}, - {file = "numpy-1.19.5-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:43d4c81d5ffdff6bae58d66a3cd7f54a7acd9a0e7b18d97abb255defc09e3140"}, - {file = "numpy-1.19.5-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:a4646724fba402aa7504cd48b4b50e783296b5e10a524c7a6da62e4a8ac9698d"}, - {file = "numpy-1.19.5-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:2e55195bc1c6b705bfd8ad6f288b38b11b1af32f3c8289d6c50d47f950c12e76"}, - {file = "numpy-1.19.5-cp36-cp36m-win32.whl", hash = "sha256:39b70c19ec771805081578cc936bbe95336798b7edf4732ed102e7a43ec5c07a"}, - {file = "numpy-1.19.5-cp36-cp36m-win_amd64.whl", hash = "sha256:dbd18bcf4889b720ba13a27ec2f2aac1981bd41203b3a3b27ba7a33f88ae4827"}, - {file = "numpy-1.19.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:603aa0706be710eea8884af807b1b3bc9fb2e49b9f4da439e76000f3b3c6ff0f"}, - {file = "numpy-1.19.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:cae865b1cae1ec2663d8ea56ef6ff185bad091a5e33ebbadd98de2cfa3fa668f"}, - {file = "numpy-1.19.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:36674959eed6957e61f11c912f71e78857a8d0604171dfd9ce9ad5cbf41c511c"}, - {file = "numpy-1.19.5-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:06fab248a088e439402141ea04f0fffb203723148f6ee791e9c75b3e9e82f080"}, - {file = "numpy-1.19.5-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6149a185cece5ee78d1d196938b2a8f9d09f5a5ebfbba66969302a778d5ddd1d"}, - {file = "numpy-1.19.5-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:50a4a0ad0111cc1b71fa32dedd05fa239f7fb5a43a40663269bb5dc7877cfd28"}, - {file = "numpy-1.19.5-cp37-cp37m-win32.whl", hash = "sha256:d051ec1c64b85ecc69531e1137bb9751c6830772ee5c1c426dbcfe98ef5788d7"}, - {file = "numpy-1.19.5-cp37-cp37m-win_amd64.whl", hash = "sha256:a12ff4c8ddfee61f90a1633a4c4afd3f7bcb32b11c52026c92a12e1325922d0d"}, - {file = "numpy-1.19.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cf2402002d3d9f91c8b01e66fbb436a4ed01c6498fffed0e4c7566da1d40ee1e"}, - {file = "numpy-1.19.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1ded4fce9cfaaf24e7a0ab51b7a87be9038ea1ace7f34b841fe3b6894c721d1c"}, - {file = "numpy-1.19.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:012426a41bc9ab63bb158635aecccc7610e3eff5d31d1eb43bc099debc979d94"}, - {file = "numpy-1.19.5-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:759e4095edc3c1b3ac031f34d9459fa781777a93ccc633a472a5468587a190ff"}, - {file = "numpy-1.19.5-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:a9d17f2be3b427fbb2bce61e596cf555d6f8a56c222bd2ca148baeeb5e5c783c"}, - {file = "numpy-1.19.5-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:99abf4f353c3d1a0c7a5f27699482c987cf663b1eac20db59b8c7b061eabd7fc"}, - {file = "numpy-1.19.5-cp38-cp38-win32.whl", hash = "sha256:384ec0463d1c2671170901994aeb6dce126de0a95ccc3976c43b0038a37329c2"}, - {file = "numpy-1.19.5-cp38-cp38-win_amd64.whl", hash = "sha256:811daee36a58dc79cf3d8bdd4a490e4277d0e4b7d103a001a4e73ddb48e7e6aa"}, - {file = "numpy-1.19.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c843b3f50d1ab7361ca4f0b3639bf691569493a56808a0b0c54a051d260b7dbd"}, - {file = "numpy-1.19.5-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d6631f2e867676b13026e2846180e2c13c1e11289d67da08d71cacb2cd93d4aa"}, - {file = "numpy-1.19.5-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7fb43004bce0ca31d8f13a6eb5e943fa73371381e53f7074ed21a4cb786c32f8"}, - {file = "numpy-1.19.5-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2ea52bd92ab9f768cc64a4c3ef8f4b2580a17af0a5436f6126b08efbd1838371"}, - {file = "numpy-1.19.5-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:400580cbd3cff6ffa6293df2278c75aef2d58d8d93d3c5614cd67981dae68ceb"}, - {file = "numpy-1.19.5-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:df609c82f18c5b9f6cb97271f03315ff0dbe481a2a02e56aeb1b1a985ce38e60"}, - {file = "numpy-1.19.5-cp39-cp39-win32.whl", hash = "sha256:ab83f24d5c52d60dbc8cd0528759532736b56db58adaa7b5f1f76ad551416a1e"}, - {file = "numpy-1.19.5-cp39-cp39-win_amd64.whl", hash = "sha256:0eef32ca3132a48e43f6a0f5a82cb508f22ce5a3d6f67a8329c81c8e226d3f6e"}, - {file = "numpy-1.19.5-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a0d53e51a6cb6f0d9082decb7a4cb6dfb33055308c4c44f53103c073f649af73"}, - {file = "numpy-1.19.5.zip", hash = "sha256:a76f502430dd98d7546e1ea2250a7360c065a5fdea52b2dffe8ae7180909b6f4"}, -] +numpy = [] oauthlib = [ {file = "oauthlib-3.2.0-py3-none-any.whl", hash = "sha256:6db33440354787f9b7f3a6dbd4febf5d0f93758354060e802f6c06cb493022fe"}, {file = "oauthlib-3.2.0.tar.gz", hash = "sha256:23a8208d75b902797ea29fd31fa80a15ed9dc2c6c16fe73f5d346f83f6fa27a2"}, @@ -4416,10 +4433,7 @@ pathspec = [ {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, ] -pathy = [ - {file = "pathy-0.6.2-py3-none-any.whl", hash = "sha256:a7aa9794fade161bb4c28a33c5bc2c6bf41f61ec5eee51cfa8914f0a433447e1"}, - {file = "pathy-0.6.2.tar.gz", hash = "sha256:3178215bdadf3741107d987020be0fb5b59888f60f96de43cce5fe45d9d4b64a"}, -] +pathy = [] pbr = [] pep440-version-utils = [ {file = "pep440-version-utils-0.3.0.tar.gz", hash = "sha256:ceb8c8da63b54cc555946d91829f72fe323f8d635b22fa54ef0a9800c37f50df"}, @@ -4891,7 +4905,10 @@ python-dateutil = [ python-engineio = [] python-socketio = [] pytz = [] -pytz-deprecation-shim = [] +pytz-deprecation-shim = [ + {file = "pytz_deprecation_shim-0.1.0.post0-py2.py3-none-any.whl", hash = "sha256:8314c9692a636c8eb3bda879b9f119e350e93223ae83e70e80c31675a0fdc1a6"}, + {file = "pytz_deprecation_shim-0.1.0.post0.tar.gz", hash = "sha256:af097bae1b616dde5c5744441e2ddc69e74dfdcb0c263129610d85b87445a59d"}, +] pyyaml = [ {file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"}, {file = "PyYAML-5.4.1-cp27-cp27m-win32.whl", hash = "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393"}, @@ -4969,6 +4986,10 @@ rsa = [] {file = "ruamel.yaml-0.16.13.tar.gz", hash = "sha256:bb48c514222702878759a05af96f4b7ecdba9b33cd4efcf25c86b882cef3a942"}, ] "ruamel.yaml.clib" = [ + {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6e7be2c5bcb297f5b82fee9c665eb2eb7001d1050deaba8471842979293a80b0"}, + {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:221eca6f35076c6ae472a531afa1c223b9c29377e62936f61bc8e6e8bdc5f9e7"}, + {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-win32.whl", hash = "sha256:1070ba9dd7f9370d0513d649420c3b362ac2d687fe78c6e888f5b12bf8bc7bee"}, + {file = "ruamel.yaml.clib-0.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:77df077d32921ad46f34816a9a16e6356d8100374579bc35e15bab5d4e9377de"}, {file = "ruamel.yaml.clib-0.2.6-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:cfdb9389d888c5b74af297e51ce357b800dd844898af9d4a547ffc143fa56751"}, {file = "ruamel.yaml.clib-0.2.6-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7b2927e92feb51d830f531de4ccb11b320255ee95e791022555971c466af4527"}, {file = "ruamel.yaml.clib-0.2.6-cp35-cp35m-win32.whl", hash = "sha256:ada3f400d9923a190ea8b59c8f60680c4ef8a4b0dfae134d2f2ff68429adfab5"}, @@ -5328,10 +5349,7 @@ typeguard = [ {file = "typeguard-2.13.3-py3-none-any.whl", hash = "sha256:5e3e3be01e887e7eafae5af63d1f36c849aaa94e3a0112097312aabfa16284f1"}, {file = "typeguard-2.13.3.tar.gz", hash = "sha256:00edaa8da3a133674796cf5ea87d9f4b4c367d77476e185e80251cc13dfbb8c4"}, ] -typer = [ - {file = "typer-0.4.2-py3-none-any.whl", hash = "sha256:023bae00d1baf358a6cc7cea45851639360bb716de687b42b0a4641cd99173f1"}, - {file = "typer-0.4.2.tar.gz", hash = "sha256:b8261c6c0152dd73478b5ba96ba677e5d6948c715c310f7c91079f311f62ec03"}, -] +typer = [] types-pkg-resources = [ {file = "types-pkg_resources-0.1.3.tar.gz", hash = "sha256:834a9b8d3dbea343562fd99d5d3359a726f6bf9d3733bccd2b4f3096fbab9dae"}, {file = "types_pkg_resources-0.1.3-py2.py3-none-any.whl", hash = "sha256:0cb9972cee992249f93fff1a491bf2dc3ce674e5a1926e27d4f0866f7d9b6d9c"}, @@ -5352,7 +5370,10 @@ typing-utils = [ {file = "typing_utils-0.1.0.tar.gz", hash = "sha256:8ff6b6705414b82575ad5ae0925ac414a9650fb8c5718289b1327dec61252f65"}, ] tzdata = [] -tzlocal = [] +tzlocal = [ + {file = "tzlocal-4.2-py3-none-any.whl", hash = "sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745"}, + {file = "tzlocal-4.2.tar.gz", hash = "sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7"}, +] ujson = [ {file = "ujson-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:511aa641a5b91d19280183b134fb6c473039d4dd82e987ac810cffba783521ac"}, {file = "ujson-5.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b045ca5497a950cc3492840adb3bcb3b9e305ed6599ed14c6aeaa08011aa463f"}, diff --git a/pyproject.toml b/pyproject.toml index 0b1e738f3439..a23fc00c859f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ matplotlib = ">=3.1,<3.4" attrs = ">=19.3,<22.2" jsonpickle = ">=1.3,<2.3" redis = ">=3.4,<5.0" -numpy = ">=1.19.2,<1.20.0" +numpy = ">=1.19.2,<1.22.0" scipy = ">=1.4.1,<1.8.0" absl-py = ">=0.9,<1.3" apscheduler = ">=3.6,<3.10" diff --git a/rasa/core/evaluation/marker_stats.py b/rasa/core/evaluation/marker_stats.py index 737263fd524c..4afece5a3619 100644 --- a/rasa/core/evaluation/marker_stats.py +++ b/rasa/core/evaluation/marker_stats.py @@ -12,12 +12,15 @@ def compute_statistics( values: List[Union[float, int]] -) -> Dict[Text, Union[int, np.float]]: +) -> Dict[Text, Union[int, float]]: """Computes some statistics over the given numbers.""" return { "count": len(values) if values else 0, "mean": np.mean(values) if values else np.nan, - "median": np.median(values) if values else np.nan, + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + "median": ( + np.median(values) if values else np.nan # type: ignore[no-untyped-call] + ), "min": min(values) if values else np.nan, "max": max(values) if values else np.nan, } @@ -250,7 +253,7 @@ def _write_per_session_statistic( marker_name: Text, statistic_name: Text, session_identifiers: List[Tuple[Text, int]], - values: List[Union[np.float, int]], + values: List[Union[float, int]], ) -> None: for record_idx, (sender_id, session_idx) in enumerate(session_identifiers): MarkerStatistics._write_row( @@ -268,17 +271,18 @@ def _write_per_session_statistic( def _write_row( table_writer: WriteRow, sender_id: Text, - session_idx: Union[int, np.float], + session_idx: Union[int, float], marker_name: Text, statistic_name: Text, - statistic_value: Union[int, np.float], + statistic_value: Union[int, float], ) -> None: if isinstance(statistic_value, int): value_str = str(statistic_value) elif np.isnan(statistic_value): value_str = str(np.nan) else: - value_str = np.round(statistic_value, 3) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + value_str = np.round(statistic_value, 3) # type: ignore[no-untyped-call] table_writer.writerow( [ str(item) diff --git a/rasa/core/featurizers/single_state_featurizer.py b/rasa/core/featurizers/single_state_featurizer.py index 7d8c504084c1..fce59c09870b 100644 --- a/rasa/core/featurizers/single_state_featurizer.py +++ b/rasa/core/featurizers/single_state_featurizer.py @@ -142,7 +142,8 @@ def _create_features( # its value if state_feature in self._default_feature_states[attribute]: features[self._default_feature_states[attribute][state_feature]] = value - features = np.expand_dims(features, 0) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + features = np.expand_dims(features, 0) # type: ignore[no-untyped-call] if sparse: features = scipy.sparse.coo_matrix(features) diff --git a/rasa/core/policies/ted_policy.py b/rasa/core/policies/ted_policy.py index cb4e1140bc27..c9e756884233 100644 --- a/rasa/core/policies/ted_policy.py +++ b/rasa/core/policies/ted_policy.py @@ -455,10 +455,16 @@ def _assemble_label_data( SEQUENCE, ) label_ids = np.arange(domain.num_actions) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 label_data.add_features( LABEL_KEY, LABEL_SUB_KEY, - [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)], + [ + FeatureArray( + np.expand_dims(label_ids, -1), # type: ignore[no-untyped-call] + number_of_dimensions=2, + ) + ], ) return label_data @@ -521,8 +527,12 @@ def _create_model_data( model_data = RasaModelData(label_key=LABEL_KEY, label_sub_key=LABEL_SUB_KEY) if label_ids is not None and encoded_all_labels is not None: + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 label_ids = np.array( - [np.expand_dims(seq_label_ids, -1) for seq_label_ids in label_ids] + [ + np.expand_dims(seq_label_ids, -1) # type: ignore[no-untyped-call] + for seq_label_ids in label_ids + ] ) model_data.add_features( LABEL_KEY, @@ -790,10 +800,15 @@ def _pick_confidence( logger.debug(f"User intent lead to '{non_e2e_action_name}'.") e2e_action_name = domain.action_names_or_texts[np.argmax(confidences[1])] logger.debug(f"User text lead to '{e2e_action_name}'.") + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 if ( - np.max(confidences[1]) > self.config[E2E_CONFIDENCE_THRESHOLD] + np.max(confidences[1]) # type: ignore[no-untyped-call] + > self.config[E2E_CONFIDENCE_THRESHOLD] # TODO maybe compare confidences is better - and np.max(similarities[1]) > np.max(similarities[0]) + and np.max(similarities[1]) # type: ignore[no-untyped-call] + > np.max(similarities[0]) # type: ignore[no-untyped-call] ): logger.debug(f"TED predicted '{e2e_action_name}' based on user text.") return confidences[1], True @@ -827,11 +842,19 @@ def predict_action_probabilities( tracker, domain, precomputations, rule_only_data=rule_only_data ) model_data = self._create_model_data(tracker_state_features) - outputs: Dict[Text, np.ndarray] = self.model.run_inference(model_data) + outputs = self.model.run_inference(model_data) - # take the last prediction in the sequence - similarities = outputs["similarities"][:, -1, :] - confidences = outputs["scores"][:, -1, :] + if isinstance(outputs["similarities"], np.ndarray): + # take the last prediction in the sequence + similarities = outputs["similarities"][:, -1, :] + else: + raise TypeError( + "model output for `similarities` " "should be a numpy array" + ) + if isinstance(outputs["scores"], np.ndarray): + confidences = outputs["scores"][:, -1, :] + else: + raise TypeError("model output for `scores` should be a numpy array") # take correct prediction from batch confidence, is_e2e_prediction = self._pick_confidence( confidences, similarities, domain diff --git a/rasa/core/policies/unexpected_intent_policy.py b/rasa/core/policies/unexpected_intent_policy.py index f1910662bfcf..9858f826884b 100644 --- a/rasa/core/policies/unexpected_intent_policy.py +++ b/rasa/core/policies/unexpected_intent_policy.py @@ -1,7 +1,7 @@ import dataclasses import logging from pathlib import Path -from typing import Any, List, Optional, Text, Dict, Type +from typing import Any, List, Optional, Text, Dict, Type, Union import numpy as np import tensorflow as tf @@ -370,10 +370,16 @@ def _assemble_label_data( f"{LABEL}_{INTENT}", SEQUENCE_LENGTH, f"{LABEL}_{INTENT}", SEQUENCE ) label_ids = np.arange(len(domain.intents)) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 label_data.add_features( LABEL_KEY, LABEL_SUB_KEY, - [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)], + [ + FeatureArray( + np.expand_dims(label_ids, -1), # type: ignore[no-untyped-call] + number_of_dimensions=2, + ) + ], ) return label_data @@ -487,7 +493,7 @@ def run_training( self.compute_label_quantiles_post_training(model_data, label_ids) def _collect_action_metadata( - self, domain: Domain, similarities: np.array, query_intent: Text + self, domain: Domain, similarities: np.ndarray, query_intent: Text ) -> UnexpecTEDIntentPolicyMetadata: """Collects metadata to be attached to the predicted action. @@ -604,8 +610,12 @@ def predict_action_probabilities( output = self.model.run_inference(model_data) # take the last prediction in the sequence - all_similarities: np.ndarray = output["similarities"] - sequence_similarities = all_similarities[:, -1, :] + if isinstance(output["similarities"], np.ndarray): + sequence_similarities = output["similarities"][:, -1, :] + else: + raise TypeError( + "model output for `similarities` " "should be a numpy array" + ) # Check for unlikely intent last_user_uttered_event = tracker.get_last_event_for(UserUttered) @@ -697,7 +707,7 @@ def _should_check_for_intent(self, intent: Text, domain: Domain) -> bool: return True def _check_unlikely_intent( - self, domain: Domain, similarities: np.array, query_intent: Text + self, domain: Domain, similarities: np.ndarray, query_intent: Text ) -> bool: """Checks if the query intent is probable according to model's predictions. @@ -774,7 +784,10 @@ def _collect_label_id_grouped_scores( Returns: Both buckets of similarity scores grouped by each unique label id. """ - unique_label_ids = np.unique(label_ids).tolist() + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + unique_label_ids = np.unique( + label_ids + ).tolist() # type: ignore[no-untyped-call] if LABEL_PAD_ID in unique_label_ids: unique_label_ids.remove(LABEL_PAD_ID) @@ -826,8 +839,9 @@ def _compute_label_quantiles( prediction_scores[NEGATIVE_SCORES_KEY], ) minimum_positive_score = min(positive_scores) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 if negative_scores: - quantile_values = np.quantile( + quantile_values = np.quantile( # type: ignore[no-untyped-call] negative_scores, quantile_indices, interpolation="lower" ) label_quantiles[label_id] = [ @@ -981,7 +995,9 @@ def _get_labels_embed( return labels_embed - def run_bulk_inference(self, model_data: RasaModelData) -> Dict[Text, np.ndarray]: + def run_bulk_inference( + self, model_data: RasaModelData + ) -> Dict[Text, Union[np.ndarray, Dict[Text, Any]]]: """Computes model's predictions for input data. Args: diff --git a/rasa/nlu/classifiers/diet_classifier.py b/rasa/nlu/classifiers/diet_classifier.py index 7c6640291262..c1e0cefb8e66 100644 --- a/rasa/nlu/classifiers/diet_classifier.py +++ b/rasa/nlu/classifiers/diet_classifier.py @@ -605,10 +605,16 @@ def _compute_default_label_features( logger.debug("No label features found. Computing default label features.") eye_matrix = np.eye(len(labels_example), dtype=np.float32) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 # add sequence dimension to one-hot labels return [ FeatureArray( - np.array([np.expand_dims(a, 0) for a in eye_matrix]), + np.array( + [ + np.expand_dims(a, 0) # type: ignore[no-untyped-call] + for a in eye_matrix + ] + ), number_of_dimensions=3, ) ] @@ -658,12 +664,18 @@ def _create_label_data( ) label_ids = np.array([idx for (idx, _) in labels_idx_examples]) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 # explicitly add last dimension to label_ids # to track correctly dynamic sequences label_data.add_features( LABEL_KEY, LABEL_SUB_KEY, - [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)], + [ + FeatureArray( + np.expand_dims(label_ids, -1), # type: ignore[no-untyped-call] + number_of_dimensions=2, + ) + ], ) label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE) @@ -788,10 +800,16 @@ def _add_label_features( label_ids.append(label_id_dict[example.get(label_attribute)]) # explicitly add last dimension to label_ids # to track correctly dynamic sequences + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 model_data.add_features( LABEL_KEY, LABEL_SUB_KEY, - [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)], + [ + FeatureArray( + np.expand_dims(label_ids, -1), # type: ignore[no-untyped-call] + number_of_dimensions=2, + ) + ], ) if ( @@ -853,7 +871,15 @@ def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData: @staticmethod def _check_enough_labels(model_data: RasaModelData) -> bool: - return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2 + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + return ( + len( + np.unique( # type: ignore[no-untyped-call] + model_data.get(LABEL_KEY, LABEL_SUB_KEY) + ) + ) + >= 2 + ) def train(self, training_data: TrainingData) -> Resource: """Train the embedding intent classifier on a data set.""" diff --git a/rasa/nlu/classifiers/sklearn_intent_classifier.py b/rasa/nlu/classifiers/sklearn_intent_classifier.py index 4166ab37254a..43abd07933fc 100644 --- a/rasa/nlu/classifiers/sklearn_intent_classifier.py +++ b/rasa/nlu/classifiers/sklearn_intent_classifier.py @@ -163,7 +163,10 @@ def _get_sentence_features(message: Message) -> np.ndarray: def _num_cv_splits(self, y: np.ndarray) -> int: folds = self.component_config["max_cross_validation_folds"] - return max(2, min(folds, np.min(np.bincount(y)) // 5)) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + return max( + 2, min(folds, np.min(np.bincount(y)) // 5) # type: ignore[no-untyped-call] + ) def _create_classifier( self, num_threads: int, y: np.ndarray @@ -260,7 +263,11 @@ def predict(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: pred_result = self.predict_prob(X) # sort the probabilities retrieving the indices of # the elements in sorted order - sorted_indices = np.fliplr(np.argsort(pred_result, axis=1)) + + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + sorted_indices = np.fliplr( # type: ignore[no-untyped-call] + np.argsort(pred_result, axis=1) + ) return sorted_indices, pred_result[:, sorted_indices] def persist(self) -> None: diff --git a/rasa/nlu/featurizers/dense_featurizer/dense_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/dense_featurizer.py index d3bc6990be69..7c7c79c3a691 100644 --- a/rasa/nlu/featurizers/dense_featurizer/dense_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/dense_featurizer.py @@ -48,7 +48,10 @@ def aggregate_sequence_features( if pooling_operation == MEAN_POOLING: return np.mean(dense_sequence_features, axis=0, keepdims=True) elif pooling_operation == MAX_POOLING: - return np.max(dense_sequence_features, axis=0, keepdims=True) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + return np.max( + dense_sequence_features, axis=0, keepdims=True + ) # type: ignore[no-untyped-call] else: raise InvalidConfigException( f"Invalid pooling operation specified. Available operations are " diff --git a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py index 9c89c6ed87d5..f48ff4c8671a 100644 --- a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py @@ -358,8 +358,7 @@ def _compute_attention_mask( ) attention_mask.append(padded_sequence) - attention_mask = np.array(attention_mask).astype(np.float32) - return attention_mask + return np.array(attention_mask).astype(np.float32) def _extract_sequence_lengths( self, batch_token_ids: List[List[int]] @@ -542,8 +541,9 @@ def _add_extra_padding( reshaped_sequence_embeddings = [] for index, embedding in enumerate(sequence_embeddings): embedding_size = embedding.shape[-1] + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 if actual_sequence_lengths[index] > self.max_model_sequence_length: - embedding = np.concatenate( + embedding = np.concatenate( # type: ignore[no-untyped-call] [ embedding, np.zeros( @@ -654,9 +654,8 @@ def _get_model_features_for_batch( sequence_final_embeddings = [] for embeddings, tokens in zip(sequence_embeddings, batch_tokens): sequence_final_embeddings.append(embeddings[: len(tokens)]) - sequence_final_embeddings = np.array(sequence_final_embeddings) - return sentence_embeddings, sequence_final_embeddings + return sentence_embeddings, np.array(sequence_final_embeddings) def _get_docs_for_batch( self, diff --git a/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py index b94b9364e36c..cbba1ea2b4a0 100644 --- a/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py @@ -159,10 +159,9 @@ def features_for_tokens( feature_extractor: "mitie.total_word_feature_extractor", ) -> Tuple[np.ndarray, np.ndarray]: """Calculates features.""" - sequence_features = [] - for token in tokens: - sequence_features.append(feature_extractor.get_feature_vector(token.text)) - sequence_features = np.array(sequence_features) + sequence_features = np.array( + [feature_extractor.get_feature_vector(token.text) for token in tokens] + ) sentence_fetaures = self.aggregate_sequence_features( sequence_features, self.pooling_operation diff --git a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py index 97e11b54089b..92312197755a 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py @@ -473,10 +473,10 @@ def _map_raw_features_to_indices( if feature_idx > -1: rows.append(token_idx) cols.append(feature_idx) - rows = np.array(rows) - cols = np.array(cols) data = np.ones(len(rows)) - return scipy.sparse.coo_matrix((data, (rows, cols)), shape=shape) + return scipy.sparse.coo_matrix( + (data, (np.array(rows), np.array(cols))), shape=shape + ) @classmethod def create( diff --git a/rasa/shared/nlu/training_data/features.py b/rasa/shared/nlu/training_data/features.py index 273b3ad22631..3dd58f57fcb2 100644 --- a/rasa/shared/nlu/training_data/features.py +++ b/rasa/shared/nlu/training_data/features.py @@ -98,8 +98,8 @@ def _combine_dense_features(self, additional_features: Features) -> None: f"Cannot combine dense features as sequence dimensions do not " f"match: {self.features.ndim} != {additional_features.features.ndim}." ) - - self.features = np.concatenate( + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + self.features = np.concatenate( # type: ignore[no-untyped-call] (self.features, additional_features.features), axis=-1 ) @@ -149,7 +149,7 @@ def __eq__(self, other: Any) -> bool: def fingerprint(self) -> Text: """Calculate a stable string fingerprint for the features.""" if self.is_dense(): - f_as_text = self.features.tostring() + f_as_text = self.features.tobytes() else: f_as_text = rasa.shared.nlu.training_data.util.sparse_matrix_to_string( self.features @@ -317,7 +317,10 @@ def combine( # Combine the features arbitrary_feature = features_list[0] if not arbitrary_feature.is_sparse(): - features = np.concatenate([f.features for f in features_list], axis=-1) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + features = np.concatenate( # type: ignore[no-untyped-call] + [f.features for f in features_list], axis=-1 + ) else: features = scipy.sparse.hstack([f.features for f in features_list]) return Features( diff --git a/rasa/utils/common.py b/rasa/utils/common.py index dae1dd26abb1..48fde9b254d2 100644 --- a/rasa/utils/common.py +++ b/rasa/utils/common.py @@ -19,6 +19,7 @@ Union, ContextManager, Set, + Tuple, ) from socket import SOCK_DGRAM, SOCK_STREAM @@ -38,14 +39,16 @@ T = TypeVar("T") -EXPECTED_PILLOW_DEPRECATION_WARNINGS = [ + +EXPECTED_PILLOW_DEPRECATION_WARNINGS: List[Tuple[Type[Warning], str]] = [ # Keras uses deprecated Pillow features # cf. https://github.com/keras-team/keras/issues/16639 (DeprecationWarning, f"{method} is deprecated and will be removed in Pillow 10 .*") for method in ["BICUBIC", "NEAREST", "BILINEAR", "HAMMING", "BOX", "LANCZOS"] ] -EXPECTED_WARNINGS = [ + +EXPECTED_WARNINGS: List[Tuple[Type[Warning], str]] = [ # TODO (issue #9932) ( np.VisibleDeprecationWarning, @@ -63,7 +66,9 @@ # is not available on PyPi, so we cannot pin the newer version. # cf. https://github.com/google/flatbuffers/issues/6957 (DeprecationWarning, "the imp module is deprecated in favour of importlib.*"), -] + EXPECTED_PILLOW_DEPRECATION_WARNINGS +] + +EXPECTED_WARNINGS.extend(EXPECTED_PILLOW_DEPRECATION_WARNINGS) class TempDirectoryPath(str, ContextManager): diff --git a/rasa/utils/plotting.py b/rasa/utils/plotting.py index 00e05fbbced3..c2ffbc0e8733 100644 --- a/rasa/utils/plotting.py +++ b/rasa/utils/plotting.py @@ -167,11 +167,21 @@ def _extract_paired_histogram_specification( Raises: ValueError: If histogram_data does not contain values. """ - if not histogram_data or not np.concatenate(histogram_data).size: + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + if ( + not histogram_data + or not np.concatenate(histogram_data).size # type: ignore[no-untyped-call] + ): rasa.shared.utils.io.raise_warning("No data to plot paired histogram.") raise ValueError("No data to plot paired histogram.") - min_data_value = np.min(np.concatenate(histogram_data)) - max_data_value = np.max(np.concatenate(histogram_data)) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + min_data_value = np.min( + np.concatenate(histogram_data) # type: ignore[no-untyped-call] + ) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + max_data_value = np.max( + np.concatenate(histogram_data) # type: ignore[no-untyped-call] + ) bin_width = (max_data_value - min_data_value) / num_bins bins = [ min_data_value + i * bin_width @@ -181,7 +191,10 @@ def _extract_paired_histogram_specification( ] histograms = [ # A list of counts - how often a value in `data` falls into a particular bin - np.histogram(data, bins=bins, density=density)[0] + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + np.histogram(data, bins=bins, density=density)[ # type: ignore[no-untyped-call] + 0 + ] for data in histogram_data ] @@ -274,7 +287,8 @@ def plot_paired_histogram( axes[side].barh( bins[:-1], tallies[side], - height=np.diff(bins), + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + height=np.diff(bins), # type: ignore[no-untyped-call] align="center", color=colors[side], linewidth=1, diff --git a/rasa/utils/tensorflow/data_generator.py b/rasa/utils/tensorflow/data_generator.py index af026f63b35f..f79c4d34e4ad 100644 --- a/rasa/utils/tensorflow/data_generator.py +++ b/rasa/utils/tensorflow/data_generator.py @@ -270,7 +270,10 @@ def _4d_scipy_matrix_to_values(feature_array: FeatureArray) -> List[np.ndarray]: # transformation does not work (e.g. you cannot access x.row, x.col) if not isinstance(array_of_array_of_sparse[0][0], scipy.sparse.coo_matrix): array_of_array_of_sparse = [ - [x.tocoo() for x in array_of_sparse] + [ + x.tocoo() if isinstance(x, scipy.sparse.spmatrix) else x + for x in array_of_sparse + ] for array_of_sparse in array_of_array_of_sparse ] diff --git a/rasa/utils/tensorflow/model_data.py b/rasa/utils/tensorflow/model_data.py index b195506cfa42..d1e50d14538c 100644 --- a/rasa/utils/tensorflow/model_data.py +++ b/rasa/utils/tensorflow/model_data.py @@ -139,6 +139,8 @@ def __reduce__(self) -> Tuple[Any, Any, Any]: A tuple. """ pickled_state = super(FeatureArray, self).__reduce__() + if isinstance(pickled_state, str): + raise TypeError("np array __reduce__ returned string instead of tuple.") new_state = pickled_state[2] + ( self.number_of_dimensions, self.is_sparse, @@ -158,7 +160,10 @@ def __setstate__(self, state: Any, **kwargs: Any) -> None: self.number_of_dimensions = state[-3] self.is_sparse = state[-2] self.units = state[-1] - super(FeatureArray, self).__setstate__(state[0:-3], **kwargs) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + super(FeatureArray, self).__setstate__( + state[0:-3], **kwargs + ) # type: ignore[no-untyped-call] # pytype: enable=attribute-error @@ -601,7 +606,16 @@ def split( label_ids = self._create_label_ids( self.data[self.label_key][self.label_sub_key][0] ) - label_counts = dict(zip(*np.unique(label_ids, return_counts=True, axis=0))) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + label_counts: Dict[int, int] = dict( + zip( + *np.unique( + label_ids, # type: ignore[no-untyped-call] + return_counts=True, + axis=0, + ) + ) + ) self._check_train_test_sizes(number_of_test_examples, label_counts) @@ -699,7 +713,8 @@ def balanced_data(self, data: Data, batch_size: int, shuffle: bool) -> Data: label_ids = self._create_label_ids(data[self.label_key][self.label_sub_key][0]) - unique_label_ids, counts_label_ids = np.unique( + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + unique_label_ids, counts_label_ids = np.unique( # type: ignore[no-untyped-call] label_ids, return_counts=True, axis=0 ) num_label_ids = len(unique_label_ids) @@ -723,7 +738,7 @@ def balanced_data(self, data: Data, batch_size: int, shuffle: bool) -> Data: if shuffle: indices_of_labels = np.random.permutation(num_label_ids) else: - indices_of_labels = range(num_label_ids) + indices_of_labels = np.asarray(range(num_label_ids)) for index in indices_of_labels: if num_data_cycles[index] > 0 and not skipped[index]: @@ -754,12 +769,15 @@ def balanced_data(self, data: Data, batch_size: int, shuffle: bool) -> Data: break final_data: Data = defaultdict(lambda: defaultdict(list)) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 for key, attribute_data in new_data.items(): for sub_key, features in attribute_data.items(): for f in features: final_data[key][sub_key].append( FeatureArray( - np.concatenate(np.array(f)), + np.concatenate( # type: ignore[no-untyped-call] + np.array(f) + ), number_of_dimensions=f[0].number_of_dimensions, ) ) @@ -930,9 +948,10 @@ def _combine_features( return FeatureArray( scipy.sparse.vstack([feature_1, feature_2]), number_of_dimensions ) - + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 return FeatureArray( - np.concatenate([feature_1, feature_2]), number_of_dimensions + np.concatenate([feature_1, feature_2]), # type: ignore[no-untyped-call] + number_of_dimensions, ) @staticmethod diff --git a/rasa/utils/tensorflow/model_data_utils.py b/rasa/utils/tensorflow/model_data_utils.py index c78febb8b7f2..843e3f68c561 100644 --- a/rasa/utils/tensorflow/model_data_utils.py +++ b/rasa/utils/tensorflow/model_data_utils.py @@ -428,11 +428,10 @@ def _extract_features( attribute: Text, ) -> Tuple[ List[np.ndarray], - Dict[Text, List[List["Features"]]], - Dict[Text, List[List["Features"]]], + Dict[Text, List[List[np.ndarray]]], + Dict[Text, List[List[scipy.sparse.spmatrix]]], ]: - """Create masks for all attributes of the given features and split the features - into sparse and dense features. + """Create masks for feature attributes and split into dense and sparse features. Args: features: all features @@ -490,7 +489,10 @@ def _extract_features( # add additional dimension to attribute mask # to get a vector of shape (dialogue length x 1), # the batch dim will be added later - attribute_mask = np.expand_dims(attribute_mask, -1) + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 + attribute_mask = np.expand_dims( + attribute_mask, -1 + ) # type: ignore[no-untyped-call] attribute_masks.append(attribute_mask) return attribute_masks, dense_features, sparse_features diff --git a/rasa/utils/tensorflow/models.py b/rasa/utils/tensorflow/models.py index ec1c7a98b38d..461ad3b9914b 100644 --- a/rasa/utils/tensorflow/models.py +++ b/rasa/utils/tensorflow/models.py @@ -45,7 +45,7 @@ ) from rasa.shared.nlu.constants import TEXT from rasa.shared.exceptions import RasaException - +from rasa.utils.tensorflow.types import BatchData, MaybeNestedBatchData if TYPE_CHECKING: from tensorflow.python.types.core import GenericFunction @@ -234,7 +234,8 @@ def _dynamic_signature( element_spec = [] for tensor in batch_in: if len(tensor.shape) > 1: - shape = [None] * (len(tensor.shape) - 1) + [tensor.shape[-1]] + shape: List[Union[None, int]] = [None] * (len(tensor.shape) - 1) + shape += [tensor.shape[-1]] else: shape = [None] element_spec.append(tf.TensorSpec(shape, tensor.dtype)) @@ -330,9 +331,9 @@ def run_inference( @staticmethod def _merge_batch_outputs( - all_outputs: Dict[Text, Union[np.ndarray, Dict[Text, np.ndarray]]], + all_outputs: Dict[Text, Union[np.ndarray, Dict[Text, Any]]], batch_output: Dict[Text, Union[np.ndarray, Dict[Text, np.ndarray]]], - ) -> Dict[Text, Union[np.ndarray, Dict[Text, np.ndarray]]]: + ) -> Dict[Text, Union[np.ndarray, Dict[Text, Any]]]: """Merges a batch's output into the output for all batches. Function assumes that the schema of batch output remains the same, @@ -350,8 +351,9 @@ def _merge_batch_outputs( if not all_outputs: return batch_output for key, val in batch_output.items(): + # [numpy-upgrade] type ignore can be removed after upgrading to numpy 1.23 if isinstance(val, np.ndarray): - all_outputs[key] = np.concatenate( + all_outputs[key] = np.concatenate( # type: ignore[no-untyped-call] [all_outputs[key], batch_output[key]], axis=0 ) @@ -367,7 +369,7 @@ def _empty_lists_to_none_in_dict(input_dict: Dict[Text, Any]) -> Dict[Text, Any] def _recurse( x: Union[Dict[Text, Any], List[Any], np.ndarray] - ) -> Optional[Union[Dict[Text, Any], List[np.ndarray]]]: + ) -> Optional[Union[Dict[Text, Any], List[Any], np.ndarray]]: if isinstance(x, dict): return {k: _recurse(v) for k, v in x.items()} elif (isinstance(x, list) or isinstance(x, np.ndarray)) and np.size(x) == 0: @@ -441,7 +443,7 @@ def load( @staticmethod def batch_to_model_data_format( - batch: Union[Tuple[tf.Tensor, ...], Tuple[np.ndarray, ...]], + batch: MaybeNestedBatchData, data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]], ) -> Dict[Text, Dict[Text, List[tf.Tensor]]]: """Convert input batch tensors into batch data format. @@ -454,8 +456,7 @@ def batch_to_model_data_format( # during training batch is a tuple of input and target data # as our target data is inside the input data, we are just interested in the # input data - if isinstance(batch[0], Tuple): - batch = batch[0] + unpacked_batch = batch[0] if isinstance(batch[0], Tuple) else batch batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]] = defaultdict( lambda: defaultdict(list) @@ -471,11 +472,11 @@ def batch_to_model_data_format( ) if is_sparse: tensor, idx = RasaModel._convert_sparse_features( - batch, feature_dimension, idx, number_of_dimensions + unpacked_batch, feature_dimension, idx, number_of_dimensions ) else: tensor, idx = RasaModel._convert_dense_features( - batch, feature_dimension, idx, number_of_dimensions + unpacked_batch, feature_dimension, idx, number_of_dimensions ) batch_data[key][sub_key].append(tensor) @@ -483,22 +484,23 @@ def batch_to_model_data_format( @staticmethod def _convert_dense_features( - batch: Union[Tuple[tf.Tensor], Tuple[np.ndarray]], + batch: BatchData, feature_dimension: int, idx: int, number_of_dimensions: int, ) -> Tuple[tf.Tensor, int]: - if isinstance(batch[idx], tf.Tensor): + batch_at_idx = batch[idx] + if isinstance(batch_at_idx, tf.Tensor): # explicitly substitute last dimension in shape with known # static value if number_of_dimensions > 1 and ( - batch[idx].shape is None or batch[idx].shape[-1] is None + batch_at_idx.shape is None or batch_at_idx.shape[-1] is None ): shape: List[Optional[int]] = [None] * (number_of_dimensions - 1) shape.append(feature_dimension) - batch[idx].set_shape(shape) + batch_at_idx.set_shape(shape) - return batch[idx], idx + 1 + return batch_at_idx, idx + 1 # convert to Tensor return ( @@ -508,7 +510,7 @@ def _convert_dense_features( @staticmethod def _convert_sparse_features( - batch: Union[Tuple[tf.Tensor, ...], Tuple[np.ndarray, ...]], + batch: BatchData, feature_dimension: int, idx: int, number_of_dimensions: int, diff --git a/rasa/utils/tensorflow/types.py b/rasa/utils/tensorflow/types.py new file mode 100644 index 000000000000..26cf5a0e8e81 --- /dev/null +++ b/rasa/utils/tensorflow/types.py @@ -0,0 +1,6 @@ +from typing import Tuple, Union +import tensorflow as tf +import numpy as np + +BatchData = Union[Tuple[tf.Tensor, ...], Tuple[np.ndarray, ...]] +MaybeNestedBatchData = Union[Tuple[BatchData, ...], BatchData] diff --git a/rasa/utils/train_utils.py b/rasa/utils/train_utils.py index 291ce9286da8..5aa305618e3c 100644 --- a/rasa/utils/train_utils.py +++ b/rasa/utils/train_utils.py @@ -42,7 +42,7 @@ def rank_and_mask( confidences: np.ndarray, ranking_length: int = 0, renormalize: bool = False -) -> Tuple[np.array, np.array]: +) -> Tuple[np.ndarray, np.ndarray]: """Computes a ranking of the given confidences. First, it computes a list containing the indices that would sort all the given