From f5abd985d1261dcbbc8f079e3bb02a819b3d17ac Mon Sep 17 00:00:00 2001 From: Stefan Jansen Date: Thu, 26 Sep 2024 10:20:17 -0400 Subject: [PATCH] MAINT: Numpy 2.0 compatibility (#264) * version updates and test fixes * black version update * add build_wheels tag input * fix up/download_artifactsv4 errors * limit py39 > numpy1 * skip test_summaries_after_fillna for mac os * skip tests on arm64 * no cp39 arm64 wheels --- .github/workflows/build_wheels.yml | 70 +++--- .github/workflows/ci_tests_full.yml | 2 +- .github/workflows/ci_tests_quick.yml | 4 +- .pre-commit-config.yaml | 8 +- README.md | 20 +- pyproject.toml | 53 +++-- src/zipline/_protocol.pxd | 2 +- src/zipline/algorithm.py | 8 +- src/zipline/assets/_assets.pyx | 1 + src/zipline/assets/assets.py | 11 +- src/zipline/assets/continuous_futures.pyx | 7 +- src/zipline/country.py | 1 + src/zipline/data/_equities.pyx | 1 - src/zipline/data/_resample.pyx | 1 + src/zipline/data/bundles/core.py | 7 +- src/zipline/data/bundles/csvdir.py | 1 + src/zipline/data/bundles/quandl.py | 1 + src/zipline/data/data_portal.py | 22 +- src/zipline/data/fx/hdf5.py | 1 + src/zipline/data/fx/in_memory.py | 1 + src/zipline/data/hdf5_daily_bars.py | 2 +- src/zipline/data/history_loader.py | 22 +- src/zipline/data/resample.py | 10 +- src/zipline/dispatch.py | 1 + src/zipline/examples/__init__.py | 13 ++ src/zipline/examples/dual_ema_talib.py | 7 + src/zipline/examples/momentum_pipeline.py | 1 + src/zipline/examples/olmar.py | 1 + src/zipline/finance/ledger.py | 8 +- src/zipline/finance/metrics/metric.py | 12 +- src/zipline/finance/slippage.py | 7 +- src/zipline/lib/_factorize.pyx | 16 +- src/zipline/lib/adjustment.pyx | 2 +- src/zipline/lib/labelarray.py | 1 + src/zipline/lib/normalize.py | 2 +- src/zipline/lib/quantiles.py | 4 +- src/zipline/lib/rank.pyx | 3 +- src/zipline/pipeline/api_utils.py | 1 + .../pipeline/classifiers/classifier.py | 1 + src/zipline/pipeline/common.py | 1 + src/zipline/pipeline/data/equity_pricing.py | 1 + src/zipline/pipeline/data/testing.py | 1 + src/zipline/pipeline/domain.py | 1 + src/zipline/pipeline/downsample_helpers.py | 1 + src/zipline/pipeline/engine.py | 1 + src/zipline/pipeline/expression.py | 1 + src/zipline/pipeline/factors/basic.py | 18 +- src/zipline/pipeline/factors/events.py | 1 + src/zipline/pipeline/factors/factor.py | 1 + src/zipline/pipeline/factors/technical.py | 1 + src/zipline/pipeline/filters/filter.py | 1 + src/zipline/pipeline/filters/smoothing.py | 1 + src/zipline/pipeline/graph.py | 1 + src/zipline/pipeline/hooks/progress.py | 1 + src/zipline/pipeline/loaders/base.py | 1 + src/zipline/pipeline/loaders/events.py | 6 +- src/zipline/pipeline/loaders/frame.py | 1 + src/zipline/pipeline/loaders/testing.py | 1 + src/zipline/pipeline/mixins.py | 1 + src/zipline/pipeline/term.py | 1 + src/zipline/pipeline/visualize.py | 1 + src/zipline/test_algorithms.py | 1 + src/zipline/testing/core.py | 7 +- src/zipline/testing/fixtures.py | 12 +- src/zipline/utils/cache.py | 7 +- src/zipline/utils/deprecate.py | 1 + src/zipline/utils/memoize.py | 1 + src/zipline/utils/numpy_utils.py | 10 +- src/zipline/utils/pandas_utils.py | 1 + src/zipline/utils/paths.py | 1 + src/zipline/utils/preprocess.py | 1 + src/zipline/utils/run_algo.py | 24 +- src/zipline/utils/sentinel.py | 1 + src/zipline/utils/sharedoc.py | 1 + tests/finance/test_slippage.py | 2 +- tests/pipeline/base.py | 1 + tests/pipeline/test_adjusted_array.py | 1 + tests/pipeline/test_adjustment.py | 1 + tests/pipeline/test_column.py | 1 + tests/pipeline/test_computable_term.py | 1 + tests/pipeline/test_dataset.py | 1 + tests/pipeline/test_downsampling.py | 1 + tests/pipeline/test_engine.py | 1 + tests/pipeline/test_events.py | 1 + tests/pipeline/test_factor.py | 20 +- tests/pipeline/test_filter.py | 1 + tests/pipeline/test_frameload.py | 1 + tests/pipeline/test_pipeline_algo.py | 1 + tests/pipeline/test_quarters_estimates.py | 206 +++++++++--------- tests/pipeline/test_slice.py | 1 + tests/pipeline/test_statistical.py | 12 +- tests/pipeline/test_technical.py | 14 +- tests/pipeline/test_term.py | 1 + tests/resources/pipeline_inputs/generate.py | 1 + tests/test_fetcher.py | 2 +- tests/test_memoize.py | 1 + tests/test_testing.py | 1 + tests/utils/test_numpy_utils.py | 1 + tests/utils/test_pandas_utils.py | 1 + tests/utils/test_preprocess.py | 1 + 100 files changed, 440 insertions(+), 284 deletions(-) diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 04010cb9ac..b83b79fc26 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -3,11 +3,17 @@ name: PyPI on: workflow_dispatch: inputs: - publish_to_pypi: - description: 'Publish to PyPI?' + target: + type: choice + description: 'Package Index' required: true - type: boolean - default: false + default: 'TESTPYPI' + options: [ 'TESTPYPI', 'PYPI' ] + version: + type: string + description: 'Version tag' + required: true + default: '3.1' jobs: build_wheels: @@ -17,7 +23,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest , windows-latest, macos-latest ] - python: [ "cp38", "cp39", "cp310", "cp311" ] + python: [ "cp39", "cp310", "cp311", "cp312" ] arch: [ auto64 ] steps: @@ -25,11 +31,8 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - -# - name: Setup Python -# uses: actions/setup-python@v5 -# with: -# python-version: ${{ matrix.python }} + fetch-tags: true + ref: ${{ github.event.inputs.version }} - name: Set Xcode version uses: maxim-lobanov/setup-xcode@v1 @@ -41,8 +44,6 @@ jobs: if: runner.os != 'Windows' uses: pypa/cibuildwheel@v2.21.1 env: - CIBW_BEFORE_ALL_LINUX: ./tools/install_talib.sh - CIBW_BEFORE_ALL_MACOS: brew install ta-lib CIBW_ARCHS_LINUX: ${{ matrix.arch }} CIBW_ARCHS_MACOS: x86_64 arm64 CIBW_BUILD: "${{ matrix.python }}-*" @@ -59,16 +60,13 @@ jobs: uses: pypa/cibuildwheel@v2.21.1 env: CIBW_BUILD: "${{ matrix.python }}-win_amd64" - CIBW_BEFORE_TEST_WINDOWS: > - call "c:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64 && - call ./tools/install_talib.bat - name: Store artifacts uses: actions/upload-artifact@v4 with: + name: my-artifact-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.arch }}.whl path: ./wheelhouse/*.whl - build_sdist: name: Build source distribution runs-on: ubuntu-latest @@ -76,41 +74,49 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + fetch-tags: true + ref: ${{ github.event.inputs.version }} - uses: actions/setup-python@v5 name: Install Python with: python-version: '3.11' - - name: Build sdist run: | pip install -U pip setuptools build python -m build --sdist - - - uses: actions/upload-artifact@v4 + - name: upload sdist + uses: actions/upload-artifact@v4 with: - path: dist/*.tar.gz + name: my-artifact-sdist + path: ./dist/*.tar.gz upload_pypi: needs: [ build_wheels, build_sdist ] runs-on: ubuntu-latest steps: - - uses: actions/download-artifact@v4 + - name: Download All Artifacts + uses: actions/download-artifact@v4 with: - name: artifact - path: dist - - - name: publish to testpypi + pattern: my-artifact-* + path: artifacts/ + merge-multiple: true + - name: Display structure of downloaded files + run: ls -R artifacts + - name: Publish to PyPI + if: ${{ github.event.inputs.target == 'PYPI' }} uses: pypa/gh-action-pypi-publish@release/v1 - if: ${{ inputs.publish_to_pypi == false }} with: user: __token__ - password: ${{ secrets.TESTPYPI_TOKEN }} - repository_url: https://test.pypi.org/legacy/ - - - name: publish to pypi + password: ${{ secrets.PYPI_TOKEN }} + packages-dir: artifacts/ + - name: Publish to PyPI - Test + if: ${{ github.event.inputs.target == 'TESTPYPI' }} uses: pypa/gh-action-pypi-publish@release/v1 - if: ${{ inputs.publish_to_pypi == true }} with: user: __token__ - password: ${{ secrets.PYPI_TOKEN }} + password: ${{ secrets.TESTPYPI_TOKEN }} + repository-url: https://test.pypi.org/legacy/ + skip-existing: true + verbose: true + packages-dir: artifacts/ diff --git a/.github/workflows/ci_tests_full.yml b/.github/workflows/ci_tests_full.yml index 00069eac66..fef28e678e 100644 --- a/.github/workflows/ci_tests_full.yml +++ b/.github/workflows/ci_tests_full.yml @@ -18,7 +18,7 @@ jobs: with: options: "--check --diff" src: "./src ./tests" - version: "~=22.0" + version: '24.1' flake8-lint: name: Lint Check diff --git a/.github/workflows/ci_tests_quick.yml b/.github/workflows/ci_tests_quick.yml index e5aed83a26..ad71ea5b2c 100644 --- a/.github/workflows/ci_tests_quick.yml +++ b/.github/workflows/ci_tests_quick.yml @@ -3,6 +3,8 @@ name: CI Tests - Quick on: workflow_dispatch: push: + branches: + - main pull_request: branches: - main @@ -17,7 +19,7 @@ jobs: with: options: "--check --diff" src: "./src ./tests" - version: "~=22.0" + version: '24.1' flake8-lint: name: Lint Check diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 37f5d73add..41989ca3f6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,5 @@ default_language_version: - python: python3.11 + python: python3.12 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: 'v4.4.0' @@ -8,12 +8,12 @@ repos: - id: check-merge-conflict - id: end-of-file-fixer - id: trailing-whitespace - - repo: https://github.com/ambv/black - rev: 23.7.0 + - repo: https://github.com/psf/black + rev: 24.1.0 hooks: - id: black additional_dependencies: ['click==8.0.4'] - language_version: python3.11 + language_version: python3.12 - repo: https://github.com/PyCQA/flake8 rev: '6.0.0' hooks: diff --git a/README.md b/README.md index 98919f2ce8..aa77d4310d 100644 --- a/README.md +++ b/README.md @@ -23,13 +23,15 @@ by [Stefan Jansen](https://www.linkedin.com/in/applied-ai/) who is trying to kee - **PyData Integration:** Input of historical data and output of performance statistics are based on Pandas DataFrames to integrate nicely into the existing PyData ecosystem. - **Statistics and Machine Learning Libraries:** You can use libraries like matplotlib, scipy, statsmodels, and scikit-klearn to support development, analysis, and visualization of state-of-the-art trading systems. +> **Note:** Release 3.05 makes Zipline compatible with Numpy 2.0, which requires Pandas 2.2.2 or higher. If you are using an older version of Pandas, you will need to upgrade it. Other packages may also still take more time to catch up with the latest Numpy release. + > **Note:** Release 3.0 updates Zipline to use [pandas](https://pandas.pydata.org/pandas-docs/stable/whatsnew/v2.0.0.html) >= 2.0 and [SQLAlchemy](https://docs.sqlalchemy.org/en/20/) > 2.0. These are major version updates that may break existing code; please review the linked docs. > **Note:** Release 2.4 updates Zipline to use [exchange_calendars](https://github.com/gerrymanoim/exchange_calendars) >= 4.2. This is a major version update and may break existing code (which we have tried to avoid but cannot guarantee). Please review the changes [here](https://github.com/gerrymanoim/exchange_calendars/issues/61). ## Installation -Zipline supports Python >= 3.8 and is compatible with current versions of the relevant [NumFOCUS](https://numfocus.org/sponsored-projects?_sft_project_category=python-interface) libraries, including [pandas](https://pandas.pydata.org/) and [scikit-learn](https://scikit-learn.org/stable/index.html). +Zipline supports Python >= 3.9 and is compatible with current versions of the relevant [NumFOCUS](https://numfocus.org/sponsored-projects?_sft_project_category=python-interface) libraries, including [pandas](https://pandas.pydata.org/) and [scikit-learn](https://scikit-learn.org/stable/index.html). ### Using `pip` @@ -94,15 +96,25 @@ def handle_data(context, data): long_mavg=long_mavg) ``` -You can then run this algorithm using the Zipline CLI. But first, you need to download some market data with historical prices and trading volumes: +You can then run this algorithm using the Zipline CLI. But first, you need to download some market data with historical prices and trading volumes. + +This will download asset pricing data from [NASDAQ](https://data.nasdaq.com/databases/WIKIP) (formerly [Quandl](https://www.nasdaq.com/about/press-center/nasdaq-acquires-quandl-advance-use-alternative-data)). + +> This requires an API key, which you can get for free by signing up at [NASDAQ Data Link](https://data.nasdaq.com). ```bash +$ export QUANDL_API_KEY="your_key_here" $ zipline ingest -b quandl +```` + +The following will +- stream the through the algorithm over the specified time range. +- save the resulting performance DataFrame as `dma.pickle`, which you can load and analyze from Python using, e.g., [pyfolio-reloaded](https://github.com/stefan-jansen/pyfolio-reloaded). + +```bash $ zipline run -f dual_moving_average.py --start 2014-1-1 --end 2018-1-1 -o dma.pickle --no-benchmark ``` -This will download asset pricing data sourced from [Quandl](https://www.quandl.com/databases/WIKIP/documentation?anchor=companies) (since [acquisition](https://www.nasdaq.com/about/press-center/nasdaq-acquires-quandl-advance-use-alternative-data) hosted by NASDAQ), and stream it through the algorithm over the specified time range. Then, the resulting performance DataFrame is saved as `dma.pickle`, which you can load and analyze from Python. - You can find other examples in the [zipline/examples](https://github.com/stefan-jansen/zipline-reloaded/tree/main/src/zipline/examples) directory. ## Questions, suggestions, bugs? diff --git a/pyproject.toml b/pyproject.toml index 2b560f75ce..e74a920a8c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,8 +31,12 @@ classifiers = [ license = { file = "LICENSE" } -requires-python = '>=3.8' +requires-python = '>=3.9' dependencies = [ + "numpy>=1.23.5, <2; python_version<'3.10'", + "numpy>=1.23.5; python_version<'3.12'", + "numpy>=1.26.0; python_version>='3.12'", + "pandas >=1.3.0,<3.0", 'alembic >=0.7.7', 'bcolz-zipline >=1.2.6', 'bottleneck >=1.0.0', @@ -46,8 +50,6 @@ dependencies = [ 'multipledispatch >=0.6.0', 'networkx >=2.0', 'numexpr >=2.6.1', - 'numpy >=1.14.5,<2.0.0', - 'pandas >=1.3', 'patsy >=0.4.0', 'python-dateutil >=2.4.2', 'python-interface >=1.5.3', @@ -57,7 +59,8 @@ dependencies = [ 'six >=1.10.0', 'sqlalchemy >=2', 'statsmodels >=0.6.1', - 'ta-lib >=0.4.09', + # ta-lib is not available for Numpy 2.0 => optional + # 'ta-lib >=0.4.09', 'tables >=3.4.3', 'toolz >=0.8.2', 'exchange-calendars >=4.2.4' @@ -75,7 +78,9 @@ requires = [ 'wheel>=0.36.0', 'Cython>=0.29.21', # 'Cython>=3', - 'oldest-supported-numpy; python_version>="3.8"', + 'numpy>=2.0.0rc1 ; python_version>"3.9"', + 'numpy<2.0 ; python_version<="3.9"' + # 'oldest-supported-numpy; python_version>="3.9"', ] build-backend = 'setuptools.build_meta' @@ -92,20 +97,20 @@ test = [ 'matplotlib >=1.5.3', 'responses >=0.9.0', 'pandas-datareader >=0.2.1', - 'click <8.1.0', +# 'click <8.1.0', + 'click', 'coverage', 'pytest-rerunfailures', - # the following are required to run tests using PostgreSQL instead of SQLite - # 'psycopg2', - # 'pytest-postgresql ==3.1.3' ] + dev = [ 'flake8 >=3.9.1', - 'black', + 'black >=24.0b1', 'pre-commit >=2.12.1', - # 'Cython>=0.29.21,<3', 'Cython>=0.29.21', + 'ruff' ] + docs = [ 'Cython', 'Sphinx >=1.3.2', @@ -114,7 +119,6 @@ docs = [ 'pydata-sphinx-theme', 'sphinx_markdown_tables', 'm2r2' - ] [project.scripts] @@ -137,19 +141,21 @@ local_scheme = 'dirty-tag' "*" = ["*.pyi", "*.pyx", "*.pxi", "*.pxd"] [tool.pytest.ini_options] +pythonpath = ['.'] testpaths = 'tests' addopts = '-v' filterwarnings = 'ignore::DeprecationWarning:pandas_datareader.compat' [tool.cibuildwheel] test-extras = "test" -test-command = "pytest -x --reruns 5 {package}/tests" +test-command = "pytest -x --reruns 5 {project}/tests" build-verbosity = 3 environment = "GITHUB_ACTIONS=true" +skip = "cp39-macosx_arm64 cp39-macosx_universal2:arm64" [tool.cibuildwheel.macos] archs = ["x86_64", "arm64", "universal2"] -test-skip = ["*universal2:arm64"] +test-skip = "cp39* *-macosx_arm64 *-macosx_universal2:arm64" [tool.cibuildwheel.linux] archs = ["auto64"] @@ -161,7 +167,7 @@ test-skip = ["*"] [tool.black] line-length = 88 -target-version = ['py38', 'py39', 'py310', 'py311'] +target-version = ['py39', 'py310', 'py311', 'py312'] exclude = ''' ( asv_bench/env @@ -175,10 +181,22 @@ exclude = ''' ) ''' +[tool.ruff.lint] +select = ["NPY201"] + [tool.tox] legacy_tox_ini = """ [tox] -envlist = py{39,310}-pandas{13,14,15}, py{39,310,311,312}-pandas{20,21,22} +envlist = + py39-pandas{13,14,15}-numpy1 + py310-pandas{13,14,15,20,21,22}-numpy1 + py311-pandas{13,14,15,20,21,22}-numpy1 + py312-pandas{13,14,15,20,21,22}-numpy1 + py39-pandas222-numpy2 + py310-pandas222-numpy2 + py311-pandas222-numpy2 + py312-pandas222-numpy2 + isolated_build = True skip_missing_interpreters = True minversion = 3.23.0 @@ -204,6 +222,9 @@ deps = pandas20: pandas>=2.0,<2.1 pandas21: pandas>=2.1,<2.2 pandas22: pandas>=2.2,<2.3 + pandas222: pandas>=2.2.2,<2.3 + numpy1: numpy>=1.23.5,<2.0 + numpy2: numpy>=2.0,<2.1 commands = pytest -n 4 --reruns 5 --cov={toxinidir}/src --cov-report term --cov-report=xml --cov-report=html:htmlcov {toxinidir}/tests diff --git a/src/zipline/_protocol.pxd b/src/zipline/_protocol.pxd index e93e3b9a94..20c57ad283 100644 --- a/src/zipline/_protocol.pxd +++ b/src/zipline/_protocol.pxd @@ -1,5 +1,5 @@ cimport numpy as np - +import numpy; numpy.import_array() from zipline.assets._assets cimport Asset diff --git a/src/zipline/algorithm.py b/src/zipline/algorithm.py index 14833db37f..b46aade048 100644 --- a/src/zipline/algorithm.py +++ b/src/zipline/algorithm.py @@ -431,9 +431,11 @@ def before_trading_start(self, data): self._in_before_trading_start = True - with handle_non_market_minutes( - data - ) if self.data_frequency == "minute" else ExitStack(): + with ( + handle_non_market_minutes(data) + if self.data_frequency == "minute" + else ExitStack() + ): self._before_trading_start(self, data) self._in_before_trading_start = False diff --git a/src/zipline/assets/_assets.pyx b/src/zipline/assets/_assets.pyx index 50a2e6bbea..3791e3d1d8 100644 --- a/src/zipline/assets/_assets.pyx +++ b/src/zipline/assets/_assets.pyx @@ -34,6 +34,7 @@ from numpy cimport int64_t import warnings cimport numpy as np + from zipline.utils.calendar_utils import get_calendar diff --git a/src/zipline/assets/assets.py b/src/zipline/assets/assets.py index ad5d235851..150e38c267 100644 --- a/src/zipline/assets/assets.py +++ b/src/zipline/assets/assets.py @@ -142,7 +142,10 @@ def merge_ownership_periods(mappings): def _build_ownership_map_from_rows(rows, key_from_row, value_from_row): mappings = {} for row in rows: - mappings.setdefault(key_from_row(row), [],).append( + mappings.setdefault( + key_from_row(row), + [], + ).append( OwnershipPeriod( # TODO FIX TZ MESS # pd.Timestamp(row.start_date, unit="ns", tz="utc"), @@ -1451,9 +1454,9 @@ def lifetimes(self, dates, include_start_date, country_codes): lifetimes = self._asset_lifetimes.get(country_codes) if lifetimes is None: - self._asset_lifetimes[ - country_codes - ] = lifetimes = self._compute_asset_lifetimes(country_codes=country_codes) + self._asset_lifetimes[country_codes] = lifetimes = ( + self._compute_asset_lifetimes(country_codes=country_codes) + ) raw_dates = as_column(dates.asi8) if include_start_date: diff --git a/src/zipline/assets/continuous_futures.pyx b/src/zipline/assets/continuous_futures.pyx index f673a3ce07..718bbd445e 100644 --- a/src/zipline/assets/continuous_futures.pyx +++ b/src/zipline/assets/continuous_futures.pyx @@ -26,15 +26,14 @@ from cpython.object cimport ( Py_GT, Py_LT, ) -from cpython cimport bool - +# from cpython cimport bool from functools import partial from numpy import array, empty, iinfo from numpy cimport int64_t, int64_t from pandas import Timestamp from zipline.utils.calendar_utils import get_calendar -import warnings +# import warnings def delivery_predicate(codes, contract): @@ -257,7 +256,7 @@ cdef class ContinuousFuture: def is_exchange_open(self, dt_minute): """ - + Parameters ---------- dt_minute: pd.Timestamp (UTC, tz-aware) diff --git a/src/zipline/country.py b/src/zipline/country.py index 16179d587f..bb23f4d13d 100644 --- a/src/zipline/country.py +++ b/src/zipline/country.py @@ -1,5 +1,6 @@ """Canonical definitions of country code constants. """ + from iso3166 import countries_by_name diff --git a/src/zipline/data/_equities.pyx b/src/zipline/data/_equities.pyx index a052804964..a480a24397 100644 --- a/src/zipline/data/_equities.pyx +++ b/src/zipline/data/_equities.pyx @@ -32,7 +32,6 @@ from numpy cimport ( uint8_t, ) from numpy.math cimport NAN - ctypedef object carray_t ctypedef object ctable_t ctypedef object Timestamp_t diff --git a/src/zipline/data/_resample.pyx b/src/zipline/data/_resample.pyx index 22bd9f8d32..0e336f6d60 100644 --- a/src/zipline/data/_resample.pyx +++ b/src/zipline/data/_resample.pyx @@ -15,6 +15,7 @@ from cython cimport boundscheck, wraparound from numpy import finfo, float64, nan, isnan from numpy cimport intp_t, float64_t, uint32_t + @boundscheck(False) @wraparound(False) cpdef void _minute_to_session_open(intp_t[:] close_locs, diff --git a/src/zipline/data/bundles/core.py b/src/zipline/data/bundles/core.py index a56379962d..cde6b0c86a 100644 --- a/src/zipline/data/bundles/core.py +++ b/src/zipline/data/bundles/core.py @@ -391,9 +391,10 @@ def ingest( cachepath = cache_path(name, environ=environ) pth.ensure_directory(pth.data_path([name, timestr], environ=environ)) pth.ensure_directory(cachepath) - with dataframe_cache( - cachepath, clean_on_failure=False - ) as cache, ExitStack() as stack: + with ( + dataframe_cache(cachepath, clean_on_failure=False) as cache, + ExitStack() as stack, + ): # we use `cleanup_on_failure=False` so that we don't purge the # cache directory if the load fails in the middle if bundle.create_writers: diff --git a/src/zipline/data/bundles/csvdir.py b/src/zipline/data/bundles/csvdir.py index 85a00a6cfe..7c6185762d 100644 --- a/src/zipline/data/bundles/csvdir.py +++ b/src/zipline/data/bundles/csvdir.py @@ -1,6 +1,7 @@ """ Module for building a complete dataset from local directory with csv files. """ + import os import sys diff --git a/src/zipline/data/bundles/quandl.py b/src/zipline/data/bundles/quandl.py index bfe0dcd5e8..ea88a13083 100644 --- a/src/zipline/data/bundles/quandl.py +++ b/src/zipline/data/bundles/quandl.py @@ -1,6 +1,7 @@ """ Module for building a complete daily dataset from Quandl's WIKI dataset. """ + from io import BytesIO import tarfile from zipfile import ZipFile diff --git a/src/zipline/data/data_portal.py b/src/zipline/data/data_portal.py index d637195b57..2e5326c807 100644 --- a/src/zipline/data/data_portal.py +++ b/src/zipline/data/data_portal.py @@ -219,11 +219,11 @@ def __init__( self.asset_finder, aligned_future_session_reader, ) - aligned_session_readers[ - ContinuousFuture - ] = ContinuousFutureSessionBarReader( - aligned_future_session_reader, - self._roll_finders, + aligned_session_readers[ContinuousFuture] = ( + ContinuousFutureSessionBarReader( + aligned_future_session_reader, + self._roll_finders, + ) ) _dispatch_minute_reader = AssetDispatchMinuteBarReader( @@ -408,7 +408,7 @@ def _get_fetcher_value(self, asset, field, dt): try: return self._augmented_sources_map[field][asset].loc[day, field] except KeyError: - return np.NaN + return np.nan def _get_single_asset_value(self, session_label, asset, field, dt, data_frequency): if self._is_extra_source(asset, field, self._augmented_sources_map): @@ -427,7 +427,7 @@ def _get_single_asset_value(self, session_label, asset, field, dt, data_frequenc elif field == "contract": return None elif field != "last_traded": - return np.NaN + return np.nan if data_frequency == "daily": if field == "contract": @@ -1033,7 +1033,7 @@ def _get_daily_window_data(self, assets, field, days_in_window, extra_slot=True) if field != "volume": # volumes default to 0, so we don't need to put NaNs in the array return_array = return_array.astype(float64) - return_array[:] = np.NAN + return_array[:] = np.nan if bar_count != 0: data = self._history_loader.history( @@ -1073,9 +1073,9 @@ def _get_adjustment_list(self, asset, adjustments_dict, table_name): try: adjustments = adjustments_dict[sid] except KeyError: - adjustments = adjustments_dict[ - sid - ] = self._adjustment_reader.get_adjustments_for_sid(table_name, sid) + adjustments = adjustments_dict[sid] = ( + self._adjustment_reader.get_adjustments_for_sid(table_name, sid) + ) return adjustments diff --git a/src/zipline/data/fx/hdf5.py b/src/zipline/data/fx/hdf5.py index e221a9c785..b426a1742d 100644 --- a/src/zipline/data/fx/hdf5.py +++ b/src/zipline/data/fx/hdf5.py @@ -92,6 +92,7 @@ Each column of the array contains exchange rates for a given date. The label for column i in a data node is the ith element of /index/dts. """ + from interface import implements import h5py import logging diff --git a/src/zipline/data/fx/in_memory.py b/src/zipline/data/fx/in_memory.py index 1000c7a7cf..ea20670572 100644 --- a/src/zipline/data/fx/in_memory.py +++ b/src/zipline/data/fx/in_memory.py @@ -1,4 +1,5 @@ """Interface and definitions for foreign exchange rate readers.""" + from interface import implements import numpy as np diff --git a/src/zipline/data/hdf5_daily_bars.py b/src/zipline/data/hdf5_daily_bars.py index f2037bbd71..6a5d3644c0 100644 --- a/src/zipline/data/hdf5_daily_bars.py +++ b/src/zipline/data/hdf5_daily_bars.py @@ -616,7 +616,7 @@ def _make_sid_selector(self, assets): """ assets = np.array(assets) sid_selector = self.sids.searchsorted(assets) - unknown = np.in1d(assets, self.sids, invert=True) + unknown = np.isin(assets, self.sids, invert=True) sid_selector[unknown] = -1 return sid_selector diff --git a/src/zipline/data/history_loader.py b/src/zipline/data/history_loader.py index bc2636d087..bdff15c8f8 100644 --- a/src/zipline/data/history_loader.py +++ b/src/zipline/data/history_loader.py @@ -289,18 +289,18 @@ def __init__( self._reader = reader self._adjustment_readers = {} if equity_adjustment_reader is not None: - self._adjustment_readers[ - Equity - ] = HistoryCompatibleUSEquityAdjustmentReader(equity_adjustment_reader) + self._adjustment_readers[Equity] = ( + HistoryCompatibleUSEquityAdjustmentReader(equity_adjustment_reader) + ) if roll_finders: - self._adjustment_readers[ - ContinuousFuture - ] = ContinuousFutureAdjustmentReader( - trading_calendar, - asset_finder, - reader, - roll_finders, - self._frequency, + self._adjustment_readers[ContinuousFuture] = ( + ContinuousFutureAdjustmentReader( + trading_calendar, + asset_finder, + reader, + roll_finders, + self._frequency, + ) ) self._window_blocks = { field: ExpiringCache(LRU(sid_cache_size)) for field in self.FIELDS diff --git a/src/zipline/data/resample.py b/src/zipline/data/resample.py index 881bf63aef..5b1d0a1241 100644 --- a/src/zipline/data/resample.py +++ b/src/zipline/data/resample.py @@ -181,7 +181,7 @@ def opens(self, assets, dt): for asset in assets: if not asset.is_alive_for_session(session_label): - opens.append(np.NaN) + opens.append(np.nan) continue if prev_dt is None: @@ -250,7 +250,7 @@ def highs(self, assets, dt): for asset in assets: if not asset.is_alive_for_session(session_label): - highs.append(np.NaN) + highs.append(np.nan) continue if prev_dt is None: @@ -318,7 +318,7 @@ def lows(self, assets, dt): for asset in assets: if not asset.is_alive_for_session(session_label): - lows.append(np.NaN) + lows.append(np.nan) continue if prev_dt is None: @@ -396,11 +396,11 @@ def _get_filled_close(asset): try: return window[~np.isnan(window)][-1] except IndexError: - return np.NaN + return np.nan for asset in assets: if not asset.is_alive_for_session(session_label): - closes.append(np.NaN) + closes.append(np.nan) continue if prev_dt is None: diff --git a/src/zipline/dispatch.py b/src/zipline/dispatch.py index 2258f25bd2..9995d51b39 100644 --- a/src/zipline/dispatch.py +++ b/src/zipline/dispatch.py @@ -2,6 +2,7 @@ Anything that has been dispatched will also be put into this module. """ + from functools import partial import sys diff --git a/src/zipline/examples/__init__.py b/src/zipline/examples/__init__.py index cb810ca304..255c30bfbf 100644 --- a/src/zipline/examples/__init__.py +++ b/src/zipline/examples/__init__.py @@ -5,12 +5,25 @@ from zipline.utils.calendar_utils import register_calendar, get_calendar from zipline import run_algorithm +from packaging.version import Version + +# talib is not yet compatible with numpy 2.0 +import numpy + +NUMPY2 = Version(numpy.__version__) >= Version("2.0.0") +if not NUMPY2: + try: + import talib + except ImportError: + talib = None # These are used by test_examples.py to discover the examples to run. def load_example_modules(): example_modules = {} for f in os.listdir(os.path.dirname(__file__)): + if (NUMPY2 or talib is None) and f == "dual_ema_talib.py": + continue if not f.endswith(".py") or f == "__init__.py" or f == "buyapple_ide.py": continue modname = f[: -len(".py")] diff --git a/src/zipline/examples/dual_ema_talib.py b/src/zipline/examples/dual_ema_talib.py index 8234017613..40cee6ef49 100644 --- a/src/zipline/examples/dual_ema_talib.py +++ b/src/zipline/examples/dual_ema_talib.py @@ -38,6 +38,13 @@ "the zipline installation documentation)." ) raise ImportError(msg) from exc +except ValueError as e: + if len(e.args) > 0 and e.args[0].startswith("numpy.dtype size changed"): + msg = ( + "TA-lib failed to import. This is likely due to lack of Numpy 2.0 compatibility. " + "Please check your versions and possibly downgrade NumPy." + ) + raise ImportError(msg) from e def initialize(context): diff --git a/src/zipline/examples/momentum_pipeline.py b/src/zipline/examples/momentum_pipeline.py index b6ab34a7b8..ed3b601b54 100644 --- a/src/zipline/examples/momentum_pipeline.py +++ b/src/zipline/examples/momentum_pipeline.py @@ -2,6 +2,7 @@ A simple Pipeline algorithm that longs the top 3 stocks by RSI and shorts the bottom 3 each day. """ + from zipline.api import ( attach_pipeline, date_rules, diff --git a/src/zipline/examples/olmar.py b/src/zipline/examples/olmar.py index 8c87fdddfb..68d48a0642 100644 --- a/src/zipline/examples/olmar.py +++ b/src/zipline/examples/olmar.py @@ -18,6 +18,7 @@ # On-Line Portfolio Moving Average Reversion + # More info can be found in the corresponding paper: # http://icml.cc/2012/papers/168.pdf def initialize(algo, eps=1, window_length=5): diff --git a/src/zipline/finance/ledger.py b/src/zipline/finance/ledger.py index e1cee66032..d7ff433672 100644 --- a/src/zipline/finance/ledger.py +++ b/src/zipline/finance/ledger.py @@ -417,7 +417,12 @@ def end_of_bar(self, session_ix): # make daily_returns hold the partial returns, this saves many # metrics from doing a concat and copying all of the previous # returns - self.daily_returns_array.iloc[session_ix] = self.todays_returns + if isinstance(self.daily_returns_array, np.ndarray): + self.daily_returns_array[session_ix] = self.todays_returns + elif isinstance(self.daily_returns_array, pd.Series): + self.daily_returns_array.iloc[session_ix] = self.todays_returns + else: + raise ValueError("Unknown daily returns array type") def end_of_session(self, session_ix): # save the daily returns time-series @@ -433,7 +438,6 @@ def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False @staticmethod def _calculate_payout(multiplier, amount, old_price, price): - return (price - old_price) * multiplier * amount def _cash_flow(self, amount): diff --git a/src/zipline/finance/metrics/metric.py b/src/zipline/finance/metrics/metric.py index 1bd5538909..88897b82ca 100644 --- a/src/zipline/finance/metrics/metric.py +++ b/src/zipline/finance/metrics/metric.py @@ -77,15 +77,15 @@ def __init__(self, ledger_field, packet_field=None): def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): field = self._packet_field - packet["cumulative_perf"][field] = packet["minute_perf"][ - field - ] = self._get_ledger_field(ledger) + packet["cumulative_perf"][field] = packet["minute_perf"][field] = ( + self._get_ledger_field(ledger) + ) def end_of_session(self, packet, ledger, session, session_ix, data_portal): field = self._packet_field - packet["cumulative_perf"][field] = packet["daily_perf"][ - field - ] = self._get_ledger_field(ledger) + packet["cumulative_perf"][field] = packet["daily_perf"][field] = ( + self._get_ledger_field(ledger) + ) class StartOfPeriodLedgerField: diff --git a/src/zipline/finance/slippage.py b/src/zipline/finance/slippage.py index 93aaf8c6a5..6bc2b9c0ed 100644 --- a/src/zipline/finance/slippage.py +++ b/src/zipline/finance/slippage.py @@ -269,7 +269,6 @@ def __init__( volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT, price_impact=0.1, ): - super(VolumeShareSlippage, self).__init__() self.volume_limit = volume_limit @@ -322,9 +321,7 @@ def process_order(self, data, order): # END simulated_impact = ( - volume_share**2 - * math.copysign(self.price_impact, order.direction) - * price + volume_share**2 * math.copysign(self.price_impact, order.direction) * price ) impacted_price = price + simulated_impact @@ -500,7 +497,7 @@ def _get_window_data(self, data, asset, window_length): except HistoryWindowStartsBeforeData: # If there is not enough data to do a full history call, return # values as if there was no data. - return 0, np.NaN + return 0, np.nan # Exclude the first value of the percent change array because it is # always just NaN. diff --git a/src/zipline/lib/_factorize.pyx b/src/zipline/lib/_factorize.pyx index 6774286740..4d448da56d 100644 --- a/src/zipline/lib/_factorize.pyx +++ b/src/zipline/lib/_factorize.pyx @@ -6,15 +6,12 @@ from libc.math cimport log cimport numpy as np import numpy as np -from zipline.utils.numpy_utils import unsigned_int_dtype_with_size_in_bytes - -np.import_array() +from zipline.utils.numpy_utils import unsigned_int_dtype_with_size_in_bytes cdef inline double log2(double d): return log(d) / log(2); - cpdef inline smallest_uint_that_can_hold(Py_ssize_t maxval): """Choose the smallest numpy unsigned int dtype that can hold ``maxval``. """ @@ -31,14 +28,12 @@ cpdef inline smallest_uint_that_can_hold(Py_ssize_t maxval): _int_sizes[int(np.ceil(log2(maxval) / 8))] ) - ctypedef fused unsigned_integral: np.uint8_t np.uint16_t np.uint32_t np.uint64_t - cdef class _NoneFirstSortKey: """Box to sort ``None`` to the front of the categories list. """ @@ -50,13 +45,12 @@ cdef class _NoneFirstSortKey: def __richcmp__(_NoneFirstSortKey self, _NoneFirstSortKey other, int op): if op == Py_LT: return ( - self.value is None or - (other.value is not None and self.value < other.value) + self.value is None or + (other.value is not None and self.value < other.value) ) return NotImplemented - cdef factorize_strings_known_impl(np.ndarray[object] values, Py_ssize_t nvalues, list categories, @@ -77,7 +71,6 @@ cdef factorize_strings_known_impl(np.ndarray[object] values, return codes, np.asarray(categories, dtype=object), reverse_categories - cpdef factorize_strings_known_categories(np.ndarray[object] values, list categories, object missing_value, @@ -132,7 +125,6 @@ cpdef factorize_strings_known_categories(np.ndarray[object] values, else: raise ValueError('ncategories larger than uint64') - cdef factorize_strings_impl(np.ndarray[object] values, object missing_value, bint sort, @@ -180,10 +172,8 @@ cdef factorize_strings_impl(np.ndarray[object] values, return codes, categories_array, reverse_categories - cdef list _int_sizes = [1, 1, 2, 4, 4, 8, 8, 8, 8] - cpdef factorize_strings(np.ndarray[object] values, object missing_value, int sort): diff --git a/src/zipline/lib/adjustment.pyx b/src/zipline/lib/adjustment.pyx index bd0e4794ea..be42ab646c 100644 --- a/src/zipline/lib/adjustment.pyx +++ b/src/zipline/lib/adjustment.pyx @@ -1,7 +1,7 @@ # cython: embedsignature=True from cpython cimport Py_EQ -cimport cython +# cimport cython from pandas import isnull, Timestamp cimport numpy as np from numpy cimport float64_t, uint8_t, int64_t diff --git a/src/zipline/lib/labelarray.py b/src/zipline/lib/labelarray.py index 7746935e7a..178c311407 100644 --- a/src/zipline/lib/labelarray.py +++ b/src/zipline/lib/labelarray.py @@ -1,6 +1,7 @@ """ An ndarray subclass for working with arrays of strings. """ + from functools import partial, total_ordering from operator import eq, ne import re diff --git a/src/zipline/lib/normalize.py b/src/zipline/lib/normalize.py index 14ad5ddea0..afe078d435 100644 --- a/src/zipline/lib/normalize.py +++ b/src/zipline/lib/normalize.py @@ -40,7 +40,7 @@ def naive_grouped_rowwise_apply(data, group_labels, func, func_args=(), out=None if out is None: out = np.empty_like(data) - for (row, label_row, out_row) in zip(data, group_labels, out): + for row, label_row, out_row in zip(data, group_labels, out): for label in np.unique(label_row): locs = label_row == label out_row[locs] = func(row[locs], *func_args) diff --git a/src/zipline/lib/quantiles.py b/src/zipline/lib/quantiles.py index 5177f67876..2cbf7cedc9 100644 --- a/src/zipline/lib/quantiles.py +++ b/src/zipline/lib/quantiles.py @@ -1,7 +1,9 @@ """ Algorithms for computing quantiles on numpy arrays. """ -from numpy.lib import apply_along_axis + +# from numpy.lib import apply_along_axis +from numpy import apply_along_axis from pandas import qcut diff --git a/src/zipline/lib/rank.pyx b/src/zipline/lib/rank.pyx index 1a45e54724..0128415c71 100644 --- a/src/zipline/lib/rank.pyx +++ b/src/zipline/lib/rank.pyx @@ -2,9 +2,8 @@ Functions for ranking and sorting. """ cimport cython -cimport numpy as np - import numpy as np +cimport numpy as np from cpython cimport bool from scipy.stats import rankdata from zipline.utils.numpy_utils import is_missing diff --git a/src/zipline/pipeline/api_utils.py b/src/zipline/pipeline/api_utils.py index ebb5f4c343..21ade16490 100644 --- a/src/zipline/pipeline/api_utils.py +++ b/src/zipline/pipeline/api_utils.py @@ -1,6 +1,7 @@ """ Utilities for creating public APIs (e.g. argument validation decorators). """ + from zipline.utils.input_validation import preprocess diff --git a/src/zipline/pipeline/classifiers/classifier.py b/src/zipline/pipeline/classifiers/classifier.py index 7266cbbb23..fa3a845998 100644 --- a/src/zipline/pipeline/classifiers/classifier.py +++ b/src/zipline/pipeline/classifiers/classifier.py @@ -1,6 +1,7 @@ """ classifier.py """ + from functools import partial from numbers import Number import operator diff --git a/src/zipline/pipeline/common.py b/src/zipline/pipeline/common.py index d74a2e91c1..9a859b3477 100644 --- a/src/zipline/pipeline/common.py +++ b/src/zipline/pipeline/common.py @@ -1,6 +1,7 @@ """ Common constants for Pipeline. """ + AD_FIELD_NAME = "asof_date" ANNOUNCEMENT_FIELD_NAME = "announcement_date" CASH_FIELD_NAME = "cash" diff --git a/src/zipline/pipeline/data/equity_pricing.py b/src/zipline/pipeline/data/equity_pricing.py index a024d239f8..1c18a63719 100644 --- a/src/zipline/pipeline/data/equity_pricing.py +++ b/src/zipline/pipeline/data/equity_pricing.py @@ -1,6 +1,7 @@ """ Dataset representing OHLCV data. """ + from zipline.utils.numpy_utils import float64_dtype, categorical_dtype from ..domain import US_EQUITIES diff --git a/src/zipline/pipeline/data/testing.py b/src/zipline/pipeline/data/testing.py index 71168e8169..ae7253f2af 100644 --- a/src/zipline/pipeline/data/testing.py +++ b/src/zipline/pipeline/data/testing.py @@ -4,6 +4,7 @@ Loaders for datasets in this file can be found in zipline.pipeline.data.testing. """ + from .dataset import Column, DataSet from zipline.utils.numpy_utils import ( bool_dtype, diff --git a/src/zipline/pipeline/domain.py b/src/zipline/pipeline/domain.py index 35fb6b4ac9..df810cf672 100644 --- a/src/zipline/pipeline/domain.py +++ b/src/zipline/pipeline/domain.py @@ -14,6 +14,7 @@ the future, we expect to expand this functionality to include more general concepts. """ + import datetime from textwrap import dedent diff --git a/src/zipline/pipeline/downsample_helpers.py b/src/zipline/pipeline/downsample_helpers.py index bb561fa51c..7dd8a552c0 100644 --- a/src/zipline/pipeline/downsample_helpers.py +++ b/src/zipline/pipeline/downsample_helpers.py @@ -1,6 +1,7 @@ """ Helpers for downsampling code. """ + from toolz import compose from operator import attrgetter, methodcaller diff --git a/src/zipline/pipeline/engine.py b/src/zipline/pipeline/engine.py index 1ff89ebe10..6063fa8ddc 100644 --- a/src/zipline/pipeline/engine.py +++ b/src/zipline/pipeline/engine.py @@ -55,6 +55,7 @@ into "narrow" format, with output labels dictated by the Pipeline's screen. This logic lives in SimplePipelineEngine._to_narrow. """ + from abc import ABC, abstractmethod from functools import partial diff --git a/src/zipline/pipeline/expression.py b/src/zipline/pipeline/expression.py index 8d2f2890d9..6b1d421716 100644 --- a/src/zipline/pipeline/expression.py +++ b/src/zipline/pipeline/expression.py @@ -1,6 +1,7 @@ """ NumericalExpression term. """ + import re from itertools import chain from numbers import Number diff --git a/src/zipline/pipeline/factors/basic.py b/src/zipline/pipeline/factors/basic.py index 0d5d28c3a8..59765e0d35 100644 --- a/src/zipline/pipeline/factors/basic.py +++ b/src/zipline/pipeline/factors/basic.py @@ -1,5 +1,6 @@ """Simple common factors. """ + from numbers import Number from numpy import ( arange, @@ -11,12 +12,13 @@ full, isnan, log, - NINF, + # NINF, sqrt, sum as np_sum, unique, errstate as np_errstate, ) +import numpy as np from zipline.pipeline.data import EquityPricing from zipline.utils.input_validation import expect_types @@ -29,7 +31,7 @@ ) from zipline.utils.numpy_utils import ( float64_dtype, - ignore_nanwarnings, + # ignore_nanwarnings, ) from .factor import CustomFactor @@ -113,7 +115,7 @@ class SimpleMovingAverage(SingleInputMixin, CustomFactor): # numpy's nan functions throw warnings when passed an array containing only # nans, but they still returns the desired value (nan), so we ignore the # warning. - ctx = ignore_nanwarnings() + # ctx = ignore_nanwarnings() def compute(self, today, assets, out, data): out[:] = nanmean(data, axis=0) @@ -153,11 +155,11 @@ class MaxDrawdown(SingleInputMixin, CustomFactor): **Default Window Length:** None """ - ctx = ignore_nanwarnings() + # ctx = ignore_nanwarnings() def compute(self, today, assets, out, data): drawdowns = fmax.accumulate(data, axis=0) - data - drawdowns[isnan(drawdowns)] = NINF + drawdowns[isnan(drawdowns)] = -np.inf drawdown_ends = nanargmax(drawdowns, axis=0) # TODO: Accelerate this loop in Cython or Numba. @@ -435,9 +437,7 @@ def compute(self, today, assets, out, data, decay_rate): variance = average((data - mean) ** 2, axis=0, weights=weights) squared_weight_sum = np_sum(weights) ** 2 - bias_correction = squared_weight_sum / ( - squared_weight_sum - np_sum(weights**2) - ) + bias_correction = squared_weight_sum / (squared_weight_sum - np_sum(weights**2)) out[:] = sqrt(variance * bias_correction) @@ -453,7 +453,7 @@ class LinearWeightedMovingAverage(SingleInputMixin, CustomFactor): # numpy's nan functions throw warnings when passed an array containing only # nans, but they still returns the desired value (nan), so we ignore the # warning. - ctx = ignore_nanwarnings() + # ctx = ignore_nanwarnings() def compute(self, today, assets, out, data): ndays = data.shape[0] diff --git a/src/zipline/pipeline/factors/events.py b/src/zipline/pipeline/factors/events.py index acb8de43ac..71b73ac0f4 100644 --- a/src/zipline/pipeline/factors/events.py +++ b/src/zipline/pipeline/factors/events.py @@ -2,6 +2,7 @@ Factors describing information about event data (e.g. earnings announcements, acquisitions, dividends, etc.). """ + from numpy import newaxis from zipline.utils.numpy_utils import ( NaTD, diff --git a/src/zipline/pipeline/factors/factor.py b/src/zipline/pipeline/factors/factor.py index 776965efff..e430951856 100644 --- a/src/zipline/pipeline/factors/factor.py +++ b/src/zipline/pipeline/factors/factor.py @@ -1,6 +1,7 @@ """ factor.py """ + import numpy as np from operator import attrgetter from numbers import Number diff --git a/src/zipline/pipeline/factors/technical.py b/src/zipline/pipeline/factors/technical.py index f46d07ed9d..974bebaa00 100644 --- a/src/zipline/pipeline/factors/technical.py +++ b/src/zipline/pipeline/factors/technical.py @@ -2,6 +2,7 @@ Technical Analysis Factors -------------------------- """ + from numpy import ( abs, average, diff --git a/src/zipline/pipeline/filters/filter.py b/src/zipline/pipeline/filters/filter.py index d223be5be9..35f6d2f496 100644 --- a/src/zipline/pipeline/filters/filter.py +++ b/src/zipline/pipeline/filters/filter.py @@ -1,6 +1,7 @@ """ filter.py """ + from itertools import chain from operator import attrgetter diff --git a/src/zipline/pipeline/filters/smoothing.py b/src/zipline/pipeline/filters/smoothing.py index e6c8816ac9..086ff2281c 100644 --- a/src/zipline/pipeline/filters/smoothing.py +++ b/src/zipline/pipeline/filters/smoothing.py @@ -4,6 +4,7 @@ These are generally useful for controlling/minimizing turnover on existing Filters. """ + from .filter import CustomFilter diff --git a/src/zipline/pipeline/graph.py b/src/zipline/pipeline/graph.py index 009f59fe12..2cf1406e86 100644 --- a/src/zipline/pipeline/graph.py +++ b/src/zipline/pipeline/graph.py @@ -1,6 +1,7 @@ """ Dependency-Graph representation of Pipeline API terms. """ + import uuid import networkx as nx diff --git a/src/zipline/pipeline/hooks/progress.py b/src/zipline/pipeline/hooks/progress.py index f26f3a8208..74998e4b77 100644 --- a/src/zipline/pipeline/hooks/progress.py +++ b/src/zipline/pipeline/hooks/progress.py @@ -1,5 +1,6 @@ """Pipeline hooks for tracking and displaying progress. """ + from collections import namedtuple import time diff --git a/src/zipline/pipeline/loaders/base.py b/src/zipline/pipeline/loaders/base.py index 618a5dbc84..f5b837ff9e 100644 --- a/src/zipline/pipeline/loaders/base.py +++ b/src/zipline/pipeline/loaders/base.py @@ -1,6 +1,7 @@ """ Base class for Pipeline API data loaders. """ + from interface import default, Interface diff --git a/src/zipline/pipeline/loaders/events.py b/src/zipline/pipeline/loaders/events.py index b9cf324778..b7cb8bedb9 100644 --- a/src/zipline/pipeline/loaders/events.py +++ b/src/zipline/pipeline/loaders/events.py @@ -23,7 +23,11 @@ def required_event_fields(next_value_columns, previous_value_columns): ``next_value_columns`` and ``previous_value_columns``. """ # These metadata columns are used to align event indexers. - return {TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME,}.union( + return { + TS_FIELD_NAME, + SID_FIELD_NAME, + EVENT_DATE_FIELD_NAME, + }.union( # We also expect any of the field names that our loadable columns # are mapped to. next_value_columns.values(), diff --git a/src/zipline/pipeline/loaders/frame.py b/src/zipline/pipeline/loaders/frame.py index d725b54ecf..7908ae3e4a 100644 --- a/src/zipline/pipeline/loaders/frame.py +++ b/src/zipline/pipeline/loaders/frame.py @@ -1,6 +1,7 @@ """ PipelineLoader accepting a DataFrame as input. """ + from functools import partial from interface import implements diff --git a/src/zipline/pipeline/loaders/testing.py b/src/zipline/pipeline/loaders/testing.py index 3380c09b17..61ca91d15a 100644 --- a/src/zipline/pipeline/loaders/testing.py +++ b/src/zipline/pipeline/loaders/testing.py @@ -1,6 +1,7 @@ """ Loaders for zipline.pipeline.data.testing datasets. """ + from .synthetic import EyeLoader, SeededRandomLoader from ..data.testing import TestingDataSet diff --git a/src/zipline/pipeline/mixins.py b/src/zipline/pipeline/mixins.py index 4ff3da6785..ff5281dfd1 100644 --- a/src/zipline/pipeline/mixins.py +++ b/src/zipline/pipeline/mixins.py @@ -4,6 +4,7 @@ The mixin classes inherit from Term to ensure they appear before Term in the MRO of any class using the mixin """ + from abc import abstractmethod from numpy import ( diff --git a/src/zipline/pipeline/term.py b/src/zipline/pipeline/term.py index 0236327da1..b0c2c39719 100644 --- a/src/zipline/pipeline/term.py +++ b/src/zipline/pipeline/term.py @@ -1,6 +1,7 @@ """ Base class for Filters, Factors and Classifiers """ + from abc import ABC, abstractmethod from bisect import insort from collections.abc import Mapping diff --git a/src/zipline/pipeline/visualize.py b/src/zipline/pipeline/visualize.py index eba1e45e51..16d64b527f 100644 --- a/src/zipline/pipeline/visualize.py +++ b/src/zipline/pipeline/visualize.py @@ -1,6 +1,7 @@ """ Tools for visualizing dependencies between Terms. """ + from contextlib import contextmanager import errno from functools import partial diff --git a/src/zipline/test_algorithms.py b/src/zipline/test_algorithms.py index f59bc510d5..523d750df7 100644 --- a/src/zipline/test_algorithms.py +++ b/src/zipline/test_algorithms.py @@ -308,6 +308,7 @@ def handle_data(self, data): ############################## # Quantopian style algorithms + # Noop algo def initialize_noop(context): pass diff --git a/src/zipline/testing/core.py b/src/zipline/testing/core.py index 9bff4cc751..0c4dce28a8 100644 --- a/src/zipline/testing/core.py +++ b/src/zipline/testing/core.py @@ -158,9 +158,10 @@ def security_list_copy(): shutil.copytree( os.path.join(old_dir, subdir), os.path.join(new_dir, subdir) ) - with mock.patch.object( - security_list, "SECURITY_LISTS_DIR", new_dir - ), mock.patch.object(security_list, "using_copy", True, create=True): + with ( + mock.patch.object(security_list, "SECURITY_LISTS_DIR", new_dir), + mock.patch.object(security_list, "using_copy", True, create=True), + ): yield finally: shutil.rmtree(new_dir, True) diff --git a/src/zipline/testing/fixtures.py b/src/zipline/testing/fixtures.py index fa957eeac3..3e91d46673 100644 --- a/src/zipline/testing/fixtures.py +++ b/src/zipline/testing/fixtures.py @@ -1452,9 +1452,9 @@ def make_bcolz_equity_minute_bar_rootdir_path(cls): @classmethod def init_class_fixtures(cls): super(WithBcolzEquityMinuteBarReader, cls).init_class_fixtures() - cls.bcolz_equity_minute_bar_path = ( - p - ) = cls.make_bcolz_equity_minute_bar_rootdir_path() + cls.bcolz_equity_minute_bar_path = p = ( + cls.make_bcolz_equity_minute_bar_rootdir_path() + ) days = cls.equity_minute_bar_days writer = BcolzMinuteBarWriter( @@ -1514,9 +1514,9 @@ def make_bcolz_future_minute_bar_rootdir_path(cls): def init_class_fixtures(cls): super(WithBcolzFutureMinuteBarReader, cls).init_class_fixtures() trading_calendar = get_calendar("us_futures") - cls.bcolz_future_minute_bar_path = ( - p - ) = cls.make_bcolz_future_minute_bar_rootdir_path() + cls.bcolz_future_minute_bar_path = p = ( + cls.make_bcolz_future_minute_bar_rootdir_path() + ) days = cls.future_minute_bar_days writer = BcolzMinuteBarWriter( diff --git a/src/zipline/utils/cache.py b/src/zipline/utils/cache.py index de89ad6aa8..9bed4fa1c9 100644 --- a/src/zipline/utils/cache.py +++ b/src/zipline/utils/cache.py @@ -1,10 +1,13 @@ """Caching utilities for zipline""" + from collections.abc import MutableMapping import errno from functools import partial import os import pickle -from distutils import dir_util + +# from distutils import dir_util +from shutil import copytree from shutil import rmtree, move from tempfile import mkdtemp, NamedTemporaryFile @@ -370,7 +373,7 @@ def getpath(self, *path_parts): def _commit(self): """Sync the temporary directory to the final path.""" - dir_util.copy_tree(self.path, self._final_path) + copytree(src=self.path, dst=self._final_path, dirs_exist_ok=True) def __enter__(self): return self diff --git a/src/zipline/utils/deprecate.py b/src/zipline/utils/deprecate.py index 67c7d1374d..19d5846a2f 100644 --- a/src/zipline/utils/deprecate.py +++ b/src/zipline/utils/deprecate.py @@ -1,4 +1,5 @@ """Utilities for marking deprecated functions.""" + # Copyright 2016 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/zipline/utils/memoize.py b/src/zipline/utils/memoize.py index bf09a01514..70135f3994 100644 --- a/src/zipline/utils/memoize.py +++ b/src/zipline/utils/memoize.py @@ -1,6 +1,7 @@ """ Tools for memoization of function results. """ + from collections.abc import Sequence from collections import OrderedDict, namedtuple from itertools import compress diff --git a/src/zipline/utils/numpy_utils.py b/src/zipline/utils/numpy_utils.py index 277244e5e7..d3ab9d9f30 100644 --- a/src/zipline/utils/numpy_utils.py +++ b/src/zipline/utils/numpy_utils.py @@ -1,4 +1,5 @@ """ Utilities for working with numpy arrays.""" + from collections import OrderedDict from datetime import datetime from warnings import catch_warnings, filterwarnings @@ -414,10 +415,17 @@ def ignore_nanwarnings(): Helper for building a WarningContext that ignores warnings from numpy's nanfunctions. """ + from packaging.version import Version + + NUMPY2 = Version(np.__version__) >= Version("2.0.0") + if NUMPY2: + module = "numpy.lib._nanfunctions_impl" + else: + module = "numpy.lib.nanfunctions" return WarningContext( ( ("ignore",), - {"category": RuntimeWarning, "module": "numpy.lib.nanfunctions"}, + {"category": RuntimeWarning, "module": module}, ) ) diff --git a/src/zipline/utils/pandas_utils.py b/src/zipline/utils/pandas_utils.py index 7b6af82f1b..5a0901601a 100644 --- a/src/zipline/utils/pandas_utils.py +++ b/src/zipline/utils/pandas_utils.py @@ -1,6 +1,7 @@ """ Utilities for working with pandas objects. """ + from contextlib import contextmanager from copy import deepcopy from itertools import product diff --git a/src/zipline/utils/paths.py b/src/zipline/utils/paths.py index 2163615e9c..81526f7e30 100644 --- a/src/zipline/utils/paths.py +++ b/src/zipline/utils/paths.py @@ -4,6 +4,7 @@ Paths are rooted at $ZIPLINE_ROOT if that environment variable is set. Otherwise default to expanduser(~/.zipline) """ + import os from pathlib import Path from typing import Any, Iterable, Mapping, Optional, List diff --git a/src/zipline/utils/preprocess.py b/src/zipline/utils/preprocess.py index 3ee8df35bc..d41c580bef 100644 --- a/src/zipline/utils/preprocess.py +++ b/src/zipline/utils/preprocess.py @@ -1,6 +1,7 @@ """ Utilities for validating inputs to user-facing API functions. """ + import sys from textwrap import dedent from types import CodeType diff --git a/src/zipline/utils/run_algo.py b/src/zipline/utils/run_algo.py index 8ef95fab38..ef8ceb7cd9 100644 --- a/src/zipline/utils/run_algo.py +++ b/src/zipline/utils/run_algo.py @@ -213,17 +213,19 @@ def choose_loader(column): blotter=blotter, benchmark_returns=benchmark_returns, benchmark_sid=benchmark_sid, - **{ - "initialize": initialize, - "handle_data": handle_data, - "before_trading_start": before_trading_start, - "analyze": analyze, - } - if algotext is None - else { - "algo_filename": getattr(algofile, "name", ""), - "script": algotext, - }, + **( + { + "initialize": initialize, + "handle_data": handle_data, + "before_trading_start": before_trading_start, + "analyze": analyze, + } + if algotext is None + else { + "algo_filename": getattr(algofile, "name", ""), + "script": algotext, + } + ), ).run() except NoBenchmark: raise _RunAlgoError( diff --git a/src/zipline/utils/sentinel.py b/src/zipline/utils/sentinel.py index 6c8b6cdc6c..95652e6a1b 100644 --- a/src/zipline/utils/sentinel.py +++ b/src/zipline/utils/sentinel.py @@ -3,6 +3,7 @@ Sentinel objects are used when you only care to check for object identity. """ + import sys from textwrap import dedent diff --git a/src/zipline/utils/sharedoc.py b/src/zipline/utils/sharedoc.py index ae0dc1022c..95f9309a54 100644 --- a/src/zipline/utils/sharedoc.py +++ b/src/zipline/utils/sharedoc.py @@ -2,6 +2,7 @@ Shared docstrings for parameters that should be documented identically across different functions. """ + import re from textwrap import dedent from toolz import curry diff --git a/tests/finance/test_slippage.py b/tests/finance/test_slippage.py index 8e54fd5a48..ddb9eb332b 100644 --- a/tests/finance/test_slippage.py +++ b/tests/finance/test_slippage.py @@ -840,7 +840,7 @@ def make_future_minute_bar_data(cls): # Make the first month's worth of data NaN to simulate cases where a # futures contract does not exist yet. asset_start_date = cls.ASSET_START_DATE.tz_localize(data[0][1].index.tzinfo) - data[0][1].loc[:asset_start_date] = np.NaN + data[0][1].loc[:asset_start_date] = np.nan return data def test_calculate_impact_buy(self): diff --git a/tests/pipeline/base.py b/tests/pipeline/base.py index 8f890d396c..5fb4e47b69 100644 --- a/tests/pipeline/base.py +++ b/tests/pipeline/base.py @@ -1,6 +1,7 @@ """ Base class for Pipeline API unit tests. """ + import numpy as np import pandas as pd diff --git a/tests/pipeline/test_adjusted_array.py b/tests/pipeline/test_adjusted_array.py index 7bcbf9680f..34c75bc2df 100644 --- a/tests/pipeline/test_adjusted_array.py +++ b/tests/pipeline/test_adjusted_array.py @@ -1,6 +1,7 @@ """ Tests for chunked adjustments. """ + from collections import namedtuple from itertools import chain, product, zip_longest from string import ascii_lowercase, ascii_uppercase diff --git a/tests/pipeline/test_adjustment.py b/tests/pipeline/test_adjustment.py index a03960eec0..a00d8f259a 100644 --- a/tests/pipeline/test_adjustment.py +++ b/tests/pipeline/test_adjustment.py @@ -1,6 +1,7 @@ """ Tests for zipline.lib.adjustment """ + from zipline.lib import adjustment as adj from zipline.utils.numpy_utils import make_datetime64ns import pytest diff --git a/tests/pipeline/test_column.py b/tests/pipeline/test_column.py index 4bd91396c2..dc864fcc4c 100644 --- a/tests/pipeline/test_column.py +++ b/tests/pipeline/test_column.py @@ -1,6 +1,7 @@ """ Tests BoundColumn attributes and methods. """ + import operator from unittest import skipIf diff --git a/tests/pipeline/test_computable_term.py b/tests/pipeline/test_computable_term.py index d0dc05b4da..fcaf1cac22 100644 --- a/tests/pipeline/test_computable_term.py +++ b/tests/pipeline/test_computable_term.py @@ -1,5 +1,6 @@ """Tests for common behaviors shared by all ComputableTerms. """ + import numpy as np from zipline.lib.labelarray import LabelArray diff --git a/tests/pipeline/test_dataset.py b/tests/pipeline/test_dataset.py index 3a33d79444..b78ca5a1d3 100644 --- a/tests/pipeline/test_dataset.py +++ b/tests/pipeline/test_dataset.py @@ -1,5 +1,6 @@ """Tests for the zipline.pipeline.data.DataSet and related functionality. """ + import string from textwrap import dedent diff --git a/tests/pipeline/test_downsampling.py b/tests/pipeline/test_downsampling.py index abee184102..250560bce4 100644 --- a/tests/pipeline/test_downsampling.py +++ b/tests/pipeline/test_downsampling.py @@ -1,6 +1,7 @@ """ Tests for Downsampled Filters/Factors/Classifiers """ + from functools import partial import pandas as pd diff --git a/tests/pipeline/test_engine.py b/tests/pipeline/test_engine.py index 5fcf73e0a5..265b4ce050 100644 --- a/tests/pipeline/test_engine.py +++ b/tests/pipeline/test_engine.py @@ -1,6 +1,7 @@ """ Tests for SimplePipelineEngine """ + from collections import OrderedDict from itertools import product from operator import add, sub diff --git a/tests/pipeline/test_events.py b/tests/pipeline/test_events.py index b408e35c54..0f84ac2034 100644 --- a/tests/pipeline/test_events.py +++ b/tests/pipeline/test_events.py @@ -1,6 +1,7 @@ """ Tests for setting up an EventsLoader. """ + import re from datetime import time from itertools import product diff --git a/tests/pipeline/test_factor.py b/tests/pipeline/test_factor.py index da019ab82a..17c33e46f6 100644 --- a/tests/pipeline/test_factor.py +++ b/tests/pipeline/test_factor.py @@ -1,7 +1,9 @@ """ Tests for Factor terms. """ + import re +import sys from functools import partial from itertools import product from unittest import skipIf @@ -13,7 +15,6 @@ from parameterized import parameterized from scipy.stats.mstats import winsorize as scipy_winsorize from toolz import compose -from packaging.version import Version from zipline.errors import BadPercentileBounds, UnknownRankMethod from zipline.lib.labelarray import LabelArray from zipline.lib.normalize import naive_grouped_rowwise_apply as grouped_apply @@ -39,6 +40,11 @@ from zipline.utils.pandas_utils import new_pandas, skip_pipeline_new_pandas from .base import BaseUSEquityPipelineTestCase +from packaging.version import Version + +from .test_statistical import ON_GITHUB_ACTIONS + +NUMPY2 = Version(np.__version__) >= Version("2.0.0") pandas_two_point_two = False if Version(pd.__version__) >= Version("2.2"): @@ -1731,7 +1737,9 @@ def test_daily_returns_is_special_case_of_returns(self): class SummaryTestCase(BaseUSEquityPipelineTestCase, ZiplineTestCase): - @pytest.mark.filterwarnings("ignore", module=np.lib.nanfunctions) + @pytest.mark.filterwarnings( + "ignore", module=np.lib._nanfunctions_impl if NUMPY2 else np.lib.nanfunctions + ) @parameter_space( seed=[1, 2, 3], mask=[ @@ -1814,7 +1822,9 @@ def test_built_in_vs_summary(self, seed, mask): assert_equal(result["demean"], result["alt_demean"]) assert_equal(result["zscore"], result["alt_zscore"]) - @pytest.mark.filterwarnings("ignore", module=np.lib.nanfunctions) + @pytest.mark.filterwarnings( + "ignore", module=np.lib._nanfunctions_impl if NUMPY2 else np.lib.nanfunctions + ) @parameter_space( seed=[100, 200, 300], mask=[ @@ -1849,7 +1859,9 @@ def test_complex_expression(self, seed, mask): mask=self.build_mask(np.ones(shape)), ) - @pytest.mark.filterwarnings("ignore", module=np.lib.nanfunctions) + @pytest.mark.filterwarnings( + "ignore", module=np.lib._nanfunctions_impl if NUMPY2 else np.lib.nanfunctions + ) @parameter_space( seed=[40, 41, 42], mask=[ diff --git a/tests/pipeline/test_filter.py b/tests/pipeline/test_filter.py index cef378f604..73fb01e93e 100644 --- a/tests/pipeline/test_filter.py +++ b/tests/pipeline/test_filter.py @@ -1,6 +1,7 @@ """ Tests for filter terms. """ + from functools import partial from itertools import product from operator import and_ diff --git a/tests/pipeline/test_frameload.py b/tests/pipeline/test_frameload.py index 2afc62dcca..9ec98becb0 100644 --- a/tests/pipeline/test_frameload.py +++ b/tests/pipeline/test_frameload.py @@ -1,6 +1,7 @@ """ Tests for zipline.pipeline.loaders.frame.DataFrameLoader. """ + from unittest import mock import numpy as np import pandas as pd diff --git a/tests/pipeline/test_pipeline_algo.py b/tests/pipeline/test_pipeline_algo.py index 88e1f5024f..08a741ed6d 100644 --- a/tests/pipeline/test_pipeline_algo.py +++ b/tests/pipeline/test_pipeline_algo.py @@ -1,6 +1,7 @@ """ Tests for Algorithms using the Pipeline API. """ + from pathlib import Path from parameterized import parameterized diff --git a/tests/pipeline/test_quarters_estimates.py b/tests/pipeline/test_quarters_estimates.py index 1a613d0185..f047c77ec7 100644 --- a/tests/pipeline/test_quarters_estimates.py +++ b/tests/pipeline/test_quarters_estimates.py @@ -933,10 +933,10 @@ def compute(self, today, assets, out, estimate): class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase): def assert_compute(self, estimate, today): if today == pd.Timestamp("2015-01-13"): - assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12])) - assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12])) + assert_array_equal(estimate[:, 0], np.array([np.nan, np.nan, 12])) + assert_array_equal(estimate[:, 1], np.array([np.nan, 12, 12])) else: - assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12])) + assert_array_equal(estimate[:, 0], np.array([np.nan, 12, 12])) assert_array_equal(estimate[:, 1], np.array([12, 12, 12])) @classmethod @@ -948,10 +948,10 @@ class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase): def assert_compute(self, estimate, today): if today == pd.Timestamp("2015-01-13"): assert_array_equal(estimate[:, 0], np.array([11, 12, 12])) - assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21])) + assert_array_equal(estimate[:, 1], np.array([np.nan, np.nan, 21])) else: - assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21])) - assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21])) + assert_array_equal(estimate[:, 0], np.array([np.nan, 21, 21])) + assert_array_equal(estimate[:, 1], np.array([np.nan, 21, 21])) @classmethod def make_loader(cls, events, columns): @@ -1154,9 +1154,9 @@ def make_expected_timelines(cls): [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), ], end_date, ) @@ -1166,7 +1166,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-20")), ], pd.Timestamp("2015-01-20"), @@ -1174,7 +1174,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-20")), ], pd.Timestamp("2015-01-21"), @@ -1220,9 +1220,9 @@ def make_expected_timelines(cls): [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), ], end_date, ) @@ -1235,7 +1235,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-02-10")), - (10, np.NaN, pd.Timestamp("2015-02-05")), + (10, np.nan, pd.Timestamp("2015-02-05")), (20, 121, pd.Timestamp("2015-02-10")), ], pd.Timestamp("2015-02-10"), @@ -1324,7 +1324,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], @@ -1337,7 +1337,7 @@ def make_expected_timelines(cls): [ (0, 200, pd.Timestamp("2015-01-12")), (0, 201, pd.Timestamp("2015-02-10")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], @@ -1350,8 +1350,8 @@ def make_expected_timelines(cls): [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220, cls.window_test_start_date), ], end_date, @@ -1362,7 +1362,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220, cls.window_test_start_date), ], end_date, @@ -1373,7 +1373,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], @@ -1383,9 +1383,9 @@ def make_expected_timelines(cls): + [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), ], end_date, ) @@ -1633,9 +1633,9 @@ def make_expected_timelines(cls): [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), # Undo all adjustments that haven't happened yet. (30, 131 * 1 / 10, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), @@ -1648,9 +1648,9 @@ def make_expected_timelines(cls): ), cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), (30, 131, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), (50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")), @@ -1659,9 +1659,9 @@ def make_expected_timelines(cls): ), cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), (30, 131, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), @@ -1672,9 +1672,9 @@ def make_expected_timelines(cls): [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), (30, 131 * 11, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), @@ -1689,7 +1689,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")), (30, 231, pd.Timestamp("2015-01-20")), (40, 140.0 * 13, pd.Timestamp("2015-01-09")), @@ -1766,10 +1766,10 @@ def make_expected_timelines(cls): [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), - (30, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), + (30, np.nan, cls.window_test_start_date), ], end_date, ) @@ -1778,9 +1778,9 @@ def make_expected_timelines(cls): + [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), (30, 131 * 11 * 12, pd.Timestamp("2015-01-20")), ], end_date, @@ -1794,7 +1794,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 101 * 7, pd.Timestamp("2015-02-10")), - (10, np.NaN, pd.Timestamp("2015-02-05")), + (10, np.nan, pd.Timestamp("2015-02-05")), (20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")), (30, 131 * 11 * 12, pd.Timestamp("2015-01-20")), (40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")), @@ -1843,7 +1843,7 @@ def make_expected_timelines(cls): (20, 120 * 5 / 3, cls.window_test_start_date), (20, 121 * 5 / 3, pd.Timestamp("2015-01-07")), (30, 230 * 1 / 10, cls.window_test_start_date), - (40, np.NaN, pd.Timestamp("2015-01-10")), + (40, np.nan, pd.Timestamp("2015-01-10")), (50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")), ], pd.Timestamp("2015-01-12"), @@ -1856,7 +1856,7 @@ def make_expected_timelines(cls): (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-07")), (30, 230, cls.window_test_start_date), - (40, np.NaN, pd.Timestamp("2015-01-10")), + (40, np.nan, pd.Timestamp("2015-01-10")), (50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")), ], pd.Timestamp("2015-01-13"), @@ -1869,7 +1869,7 @@ def make_expected_timelines(cls): (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-07")), (30, 230, cls.window_test_start_date), - (40, np.NaN, pd.Timestamp("2015-01-10")), + (40, np.nan, pd.Timestamp("2015-01-10")), (50, 250.0, pd.Timestamp("2015-01-12")), ], pd.Timestamp("2015-01-14"), @@ -1970,7 +1970,7 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date), (20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")), (40, 240 * 13 * 14, pd.Timestamp("2015-01-15")), @@ -1985,7 +1985,7 @@ def make_expected_timelines(cls): [ (0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")), (0, 201, pd.Timestamp("2015-02-10")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date), (20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")), (40, 240 * 13 * 14, pd.Timestamp("2015-01-15")), @@ -2000,12 +2000,12 @@ def make_expected_timelines(cls): [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220 * 5 / 3, cls.window_test_start_date), (30, 230 * 1 / 10, cls.window_test_start_date), - (40, np.NaN, cls.window_test_start_date), - (50, np.NaN, cls.window_test_start_date), + (40, np.nan, cls.window_test_start_date), + (50, np.nan, cls.window_test_start_date), ], pd.Timestamp("2015-01-09"), ) @@ -2014,10 +2014,10 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200 * 1 / 4, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220 * 5 / 3, cls.window_test_start_date), - (30, np.NaN, cls.window_test_start_date), - (40, np.NaN, cls.window_test_start_date), + (30, np.nan, cls.window_test_start_date), + (40, np.nan, cls.window_test_start_date), ], pd.Timestamp("2015-01-12"), ) @@ -2026,10 +2026,10 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220, cls.window_test_start_date), - (30, np.NaN, cls.window_test_start_date), - (40, np.NaN, cls.window_test_start_date), + (30, np.nan, cls.window_test_start_date), + (40, np.nan, cls.window_test_start_date), ], end_date, ) @@ -2039,10 +2039,10 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200 * 5, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220 * 0.7, cls.window_test_start_date), - (30, np.NaN, cls.window_test_start_date), - (40, np.NaN, cls.window_test_start_date), + (30, np.nan, cls.window_test_start_date), + (40, np.nan, cls.window_test_start_date), ], end_date, ) @@ -2052,11 +2052,11 @@ def make_expected_timelines(cls): cls.create_expected_df_for_factor_compute( [ (0, 200 * 5 * 6, pd.Timestamp("2015-01-12")), - (10, np.NaN, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), (20, 220 * 0.7 * 0.8, cls.window_test_start_date), (20, 221 * 0.8, pd.Timestamp("2015-01-17")), - (30, np.NaN, cls.window_test_start_date), - (40, np.NaN, cls.window_test_start_date), + (30, np.nan, cls.window_test_start_date), + (40, np.nan, cls.window_test_start_date), ], pd.Timestamp("2015-01-20"), ) @@ -2064,11 +2064,11 @@ def make_expected_timelines(cls): + [ cls.create_expected_df_for_factor_compute( [ - (0, np.NaN, cls.window_test_start_date), - (10, np.NaN, cls.window_test_start_date), - (20, np.NaN, cls.window_test_start_date), - (30, np.NaN, cls.window_test_start_date), - (40, np.NaN, cls.window_test_start_date), + (0, np.nan, cls.window_test_start_date), + (10, np.nan, cls.window_test_start_date), + (20, np.nan, cls.window_test_start_date), + (30, np.nan, cls.window_test_start_date), + (40, np.nan, cls.window_test_start_date), ], end_date, ) @@ -2281,35 +2281,35 @@ def make_loader(cls, events, columns): def make_expected_timelines_1q_out(cls): return { pd.Timestamp("2015-01-06"): { - "estimate1": np.array([[np.NaN, np.NaN]] * 3), - "estimate2": np.array([[np.NaN, np.NaN]] * 3), + "estimate1": np.array([[np.nan, np.nan]] * 3), + "estimate2": np.array([[np.nan, np.nan]] * 3), }, pd.Timestamp("2015-01-07"): { - "estimate1": np.array([[np.NaN, np.NaN]] * 3), - "estimate2": np.array([[np.NaN, np.NaN]] * 3), + "estimate1": np.array([[np.nan, np.nan]] * 3), + "estimate2": np.array([[np.nan, np.nan]] * 3), }, pd.Timestamp("2015-01-08"): { - "estimate1": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 1110.0]]), - "estimate2": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 2110.0]]), + "estimate1": np.array([[np.nan, np.nan]] * 2 + [[np.nan, 1110.0]]), + "estimate2": np.array([[np.nan, np.nan]] * 2 + [[np.nan, 2110.0]]), }, pd.Timestamp("2015-01-09"): { "estimate1": np.array( - [[np.NaN, np.NaN]] - + [[np.NaN, 1110.0 * 4]] + [[np.nan, np.nan]] + + [[np.nan, 1110.0 * 4]] + [[1100 * 3.0, 1110.0 * 4]] ), "estimate2": np.array( - [[np.NaN, np.NaN]] - + [[np.NaN, 2110.0 * 4]] + [[np.nan, np.nan]] + + [[np.nan, 2110.0 * 4]] + [[2100 * 3.0, 2110.0 * 4]] ), }, pd.Timestamp("2015-01-12"): { "estimate1": np.array( - [[np.NaN, np.NaN]] * 2 + [[1200 * 3.0, 1210.0 * 4]] + [[np.nan, np.nan]] * 2 + [[1200 * 3.0, 1210.0 * 4]] ), "estimate2": np.array( - [[np.NaN, np.NaN]] * 2 + [[2200 * 3.0, 2210.0 * 4]] + [[np.nan, np.nan]] * 2 + [[2200 * 3.0, 2210.0 * 4]] ), }, } @@ -2317,13 +2317,13 @@ def make_expected_timelines_1q_out(cls): @classmethod def make_expected_timelines_2q_out(cls): return { - pd.Timestamp("2015-01-06"): {"estimate2": np.array([[np.NaN, np.NaN]] * 3)}, - pd.Timestamp("2015-01-07"): {"estimate2": np.array([[np.NaN, np.NaN]] * 3)}, - pd.Timestamp("2015-01-08"): {"estimate2": np.array([[np.NaN, np.NaN]] * 3)}, - pd.Timestamp("2015-01-09"): {"estimate2": np.array([[np.NaN, np.NaN]] * 3)}, + pd.Timestamp("2015-01-06"): {"estimate2": np.array([[np.nan, np.nan]] * 3)}, + pd.Timestamp("2015-01-07"): {"estimate2": np.array([[np.nan, np.nan]] * 3)}, + pd.Timestamp("2015-01-08"): {"estimate2": np.array([[np.nan, np.nan]] * 3)}, + pd.Timestamp("2015-01-09"): {"estimate2": np.array([[np.nan, np.nan]] * 3)}, pd.Timestamp("2015-01-12"): { "estimate2": np.array( - [[np.NaN, np.NaN]] * 2 + [[2100 * 3.0, 2110.0 * 4]] + [[np.nan, np.nan]] * 2 + [[2100 * 3.0, 2110.0 * 4]] ) }, } @@ -2347,10 +2347,10 @@ def make_expected_timelines_1q_out(cls): return { pd.Timestamp("2015-01-06"): { "estimate1": np.array( - [[np.NaN, np.NaN]] + [[1100.0 * 1 / 0.3, 1110.0 * 1 / 0.4]] * 2 + [[np.nan, np.nan]] + [[1100.0 * 1 / 0.3, 1110.0 * 1 / 0.4]] * 2 ), "estimate2": np.array( - [[np.NaN, np.NaN]] + [[2100.0 * 1 / 0.3, 2110.0 * 1 / 0.4]] * 2 + [[np.nan, np.nan]] + [[2100.0 * 1 / 0.3, 2110.0 * 1 / 0.4]] * 2 ), }, pd.Timestamp("2015-01-07"): { @@ -2366,8 +2366,8 @@ def make_expected_timelines_1q_out(cls): "estimate2": np.array([[2100 * 3.0, 2210.0 * 4]] * 3), }, pd.Timestamp("2015-01-12"): { - "estimate1": np.array([[1200 * 3.0, np.NaN]] * 3), - "estimate2": np.array([[2200 * 3.0, np.NaN]] * 3), + "estimate1": np.array([[1200 * 3.0, np.nan]] * 3), + "estimate2": np.array([[2200 * 3.0, np.nan]] * 3), }, } @@ -2376,15 +2376,15 @@ def make_expected_timelines_2q_out(cls): return { pd.Timestamp("2015-01-06"): { "estimate2": np.array( - [[np.NaN, np.NaN]] + [[2200 * 1 / 0.3, 2210.0 * 1 / 0.4]] * 2 + [[np.nan, np.nan]] + [[2200 * 1 / 0.3, 2210.0 * 1 / 0.4]] * 2 ) }, pd.Timestamp("2015-01-07"): {"estimate2": np.array([[2200.0, 2210.0]] * 3)}, pd.Timestamp("2015-01-08"): {"estimate2": np.array([[2200, 2210.0]] * 3)}, pd.Timestamp("2015-01-09"): { - "estimate2": np.array([[2200 * 3.0, np.NaN]] * 3) + "estimate2": np.array([[2200 * 3.0, np.nan]] * 3) }, - pd.Timestamp("2015-01-12"): {"estimate2": np.array([[np.NaN, np.NaN]] * 3)}, + pd.Timestamp("2015-01-12"): {"estimate2": np.array([[np.nan, np.nan]] * 3)}, } @@ -2608,7 +2608,7 @@ def make_expected_out(cls): pd.DataFrame( { SID_FIELD_NAME: cls.s0, - "estimate": np.NaN, + "estimate": np.nan, }, index=pd.date_range( cls.test_start_date, @@ -2633,11 +2633,11 @@ def make_expected_out(cls): index=pd.date_range(cls.test_start_date, cls.test_end_date), ), pd.DataFrame( - {SID_FIELD_NAME: cls.s2, "estimate": np.NaN}, + {SID_FIELD_NAME: cls.s2, "estimate": np.nan}, index=pd.date_range(cls.test_start_date, cls.test_end_date), ), pd.DataFrame( - {SID_FIELD_NAME: cls.s3, "estimate": np.NaN}, + {SID_FIELD_NAME: cls.s3, "estimate": np.nan}, index=pd.date_range( cls.test_start_date, cls.test_end_date - timedelta(1), @@ -2648,7 +2648,7 @@ def make_expected_out(cls): index=pd.date_range(cls.test_end_date, cls.test_end_date), ), pd.DataFrame( - {SID_FIELD_NAME: cls.s4, "estimate": np.NaN}, + {SID_FIELD_NAME: cls.s4, "estimate": np.nan}, index=pd.date_range( cls.test_start_date, cls.test_end_date - timedelta(2), @@ -2675,7 +2675,7 @@ def make_expected_out(cls): pd.DataFrame( { SID_FIELD_NAME: cls.s0, - "estimate": np.NaN, + "estimate": np.nan, }, index=pd.date_range( cls.test_start_date, @@ -2700,11 +2700,11 @@ def make_expected_out(cls): index=pd.date_range(cls.test_start_date, cls.test_end_date), ), pd.DataFrame( - {SID_FIELD_NAME: cls.s2, "estimate": np.NaN}, + {SID_FIELD_NAME: cls.s2, "estimate": np.nan}, index=pd.date_range(cls.test_start_date, cls.test_end_date), ), pd.DataFrame( - {SID_FIELD_NAME: cls.s3, "estimate": np.NaN}, + {SID_FIELD_NAME: cls.s3, "estimate": np.nan}, index=pd.date_range( cls.test_start_date, cls.test_end_date - timedelta(1), @@ -2715,7 +2715,7 @@ def make_expected_out(cls): index=pd.date_range(cls.test_end_date, cls.test_end_date), ), pd.DataFrame( - {SID_FIELD_NAME: cls.s4, "estimate": np.NaN}, + {SID_FIELD_NAME: cls.s4, "estimate": np.nan}, index=pd.date_range( cls.test_start_date, cls.test_end_date - timedelta(2), diff --git a/tests/pipeline/test_slice.py b/tests/pipeline/test_slice.py index bf748b3088..0d38c2ad5b 100644 --- a/tests/pipeline/test_slice.py +++ b/tests/pipeline/test_slice.py @@ -1,6 +1,7 @@ """ Tests for slicing pipeline terms. """ + from numpy import where import pandas as pd from pandas.testing import assert_frame_equal diff --git a/tests/pipeline/test_statistical.py b/tests/pipeline/test_statistical.py index 3c92470271..2f72302e64 100644 --- a/tests/pipeline/test_statistical.py +++ b/tests/pipeline/test_statistical.py @@ -334,9 +334,9 @@ def test_regression_of_returns_factor(self, returns_length, regression_length): x=my_asset_returns, ) for i, output in enumerate(outputs): - expected_output_results[output][ - day, asset_column - ] = expected_regression_results[i] + expected_output_results[output][day, asset_column] = ( + expected_regression_results[i] + ) for output in outputs: output_result = output_results[output] @@ -947,9 +947,9 @@ def test_factor_regression_method_two_factors(self, regression_length): x=asset_returns_10, ) for i, output in enumerate(outputs): - expected_output_results[output][ - day, asset_column - ] = expected_regression_results[i] + expected_output_results[output][day, asset_column] = ( + expected_regression_results[i] + ) for output in outputs: output_result = output_results[output] diff --git a/tests/pipeline/test_technical.py b/tests/pipeline/test_technical.py index f9a706d2eb..b1a9d37525 100644 --- a/tests/pipeline/test_technical.py +++ b/tests/pipeline/test_technical.py @@ -1,8 +1,7 @@ import numpy as np import pandas as pd -import talib from numpy.random import RandomState - +from packaging.version import Version from zipline.lib.adjusted_array import AdjustedArray from zipline.pipeline.data import USEquityPricing from zipline.pipeline.factors import ( @@ -23,6 +22,14 @@ import pytest import re +# talib is not yet compatible with numpy 2.0, and also now optional. +NUMPY2 = Version(np.__version__) >= Version("2.0.0") +if not NUMPY2: + try: + import talib + except ImportError: + talib = None + class BollingerBandsTestCase(BaseUSEquityPipelineTestCase): def closes(self, mask_last_sid): @@ -75,6 +82,7 @@ def expected_bbands(self, window_length, k, closes): mask_last_sid={True, False}, __fail_fast=True, ) + @pytest.mark.skipif(NUMPY2 or talib is None, reason="requires numpy 1.0") def test_bollinger_bands(self, window_length, k, mask_last_sid): closes = self.closes(mask_last_sid=mask_last_sid) mask = ~np.isnan(closes) @@ -194,6 +202,7 @@ def test_fso_expected_basic(self): range(5), ], ) + @pytest.mark.skipif(NUMPY2 or talib is None, reason="requires numpy 1.0") def test_fso_expected_with_talib(self, seed): """ Test the output that is returned from the fast stochastic oscillator @@ -560,7 +569,6 @@ class TestRSI: ], ) def test_rsi(self, seed_value, expected): - rsi = RSI() today = np.datetime64(1, "ns") diff --git a/tests/pipeline/test_term.py b/tests/pipeline/test_term.py index 97c09563bf..3b64742091 100644 --- a/tests/pipeline/test_term.py +++ b/tests/pipeline/test_term.py @@ -1,6 +1,7 @@ """ Tests for Term. """ + from collections import Counter from itertools import product diff --git a/tests/resources/pipeline_inputs/generate.py b/tests/resources/pipeline_inputs/generate.py index 986ecd0f3e..5984200977 100644 --- a/tests/resources/pipeline_inputs/generate.py +++ b/tests/resources/pipeline_inputs/generate.py @@ -1,6 +1,7 @@ """ Quick and dirty script to generate test case inputs. """ + from pathlib import Path from pandas_datareader.data import DataReader diff --git a/tests/test_fetcher.py b/tests/test_fetcher.py index 946c015c6b..63e24fc685 100644 --- a/tests/test_fetcher.py +++ b/tests/test_fetcher.py @@ -184,7 +184,7 @@ def handle_data(context, data): # 390 bars of signal 4 on 1/9 # 390 bars of signal 4 on 1/9 (forward filled) - np.testing.assert_array_equal([np.NaN] * 390, signal[0:390]) + np.testing.assert_array_equal([np.nan] * 390, signal[0:390]) np.testing.assert_array_equal([2] * 390, signal[390:780]) np.testing.assert_array_equal([3] * 780, signal[780:1560]) np.testing.assert_array_equal([4] * 780, signal[1560:]) diff --git a/tests/test_memoize.py b/tests/test_memoize.py index 1d5be05cfb..6930cd0b37 100644 --- a/tests/test_memoize.py +++ b/tests/test_memoize.py @@ -1,4 +1,5 @@ """Tests for zipline.utils.memoize.""" + from collections import defaultdict import gc diff --git a/tests/test_testing.py b/tests/test_testing.py index da6165f7ae..16e1dbcd90 100644 --- a/tests/test_testing.py +++ b/tests/test_testing.py @@ -1,6 +1,7 @@ """ Tests for our testing utilities. """ + from itertools import product from unittest import TestCase diff --git a/tests/utils/test_numpy_utils.py b/tests/utils/test_numpy_utils.py index 0465f69823..cf8b250021 100644 --- a/tests/utils/test_numpy_utils.py +++ b/tests/utils/test_numpy_utils.py @@ -1,6 +1,7 @@ """ Tests for zipline.utils.numpy_utils. """ + from datetime import datetime import pytest diff --git a/tests/utils/test_pandas_utils.py b/tests/utils/test_pandas_utils.py index 7c2a0c8983..4fc111a672 100644 --- a/tests/utils/test_pandas_utils.py +++ b/tests/utils/test_pandas_utils.py @@ -1,6 +1,7 @@ """ Tests for zipline/utils/pandas_utils.py """ + import pandas as pd from packaging.version import Version from zipline.testing.predicates import assert_equal diff --git a/tests/utils/test_preprocess.py b/tests/utils/test_preprocess.py index 08e86e9588..07e87671b8 100644 --- a/tests/utils/test_preprocess.py +++ b/tests/utils/test_preprocess.py @@ -1,6 +1,7 @@ """ Tests for zipline.utils.validate. """ + from operator import attrgetter from types import FunctionType