From 8d805e492cc961f2ad44fc0c443f5bbe1ea8ddde Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Thu, 1 Nov 2018 07:50:59 +1100 Subject: [PATCH 01/70] ENH: parallelised brute --- scipy/optimize/optimize.py | 58 +++++++++++++++++++++----- scipy/optimize/tests/test_optimize.py | 59 +++++++++++++++++---------- 2 files changed, 84 insertions(+), 33 deletions(-) diff --git a/scipy/optimize/optimize.py b/scipy/optimize/optimize.py index 13c24f9aa41f..eb82e61f8c28 100644 --- a/scipy/optimize/optimize.py +++ b/scipy/optimize/optimize.py @@ -32,12 +32,13 @@ import numpy from scipy._lib.six import callable, xrange from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze, - vectorize, asarray, sqrt, Inf, asfarray, isinf) + asarray, sqrt, Inf, asfarray, isinf) import numpy as np from .linesearch import (line_search_wolfe1, line_search_wolfe2, line_search_wolfe2 as line_search, LineSearchWarning) from scipy._lib._util import getargspec_no_self as _getargspec +from scipy._lib._util import MapWrapper # standard status messages of optimizers @@ -2654,7 +2655,7 @@ def _endprint(x, flag, fval, maxfun, xtol, disp): def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, - disp=False): + disp=False, workers=1): """Minimize a function over a given range by brute force. Uses the "brute force" method, i.e. computes the function's value @@ -2704,7 +2705,17 @@ def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, and/or `disp` as keyword arguments. Use None if no "polishing" function is to be used. See Notes for more details. disp : bool, optional - Set to True to print convergence messages. + Set to True to print convergence messages from the `finish` callable. + workers : int or map-like callable, optional + If `workers` is an int the grid is subdivided into `workers` + sections and evaluated in parallel (uses `multiprocessing.Pool`). + Supply `-1` to use all cores available to the Process. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the grid in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. + + .. versionadded:: 1.3.0 Returns ------- @@ -2827,16 +2838,27 @@ def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, if (N == 1): lrange = lrange[0] - def _scalarfunc(*params): - params = asarray(params).flatten() - return func(params, *args) + grid = np.mgrid[lrange] + + # obtain an array of parameters that is iterable by a map-like callable + inpt_shape = grid.shape + if (N > 1): + grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T + + wrapped_func = _Brute_Wrapper(func, args) + + # iterate over input arrays, possibly in parallel + with MapWrapper(pool=workers) as mapper: + Jout = np.array(list(mapper(wrapped_func, grid))) + if (N == 1): + grid = (grid,) + Jout = np.squeeze(Jout) + elif (N > 1): + Jout = np.reshape(Jout, inpt_shape[1:]) + grid = np.reshape(grid.T, inpt_shape) - vecfunc = vectorize(_scalarfunc) - grid = mgrid[lrange] - if (N == 1): - grid = (grid,) - Jout = vecfunc(*grid) Nshape = shape(Jout) + indx = argmin(Jout.ravel(), axis=-1) Nindx = zeros(N, int) xmin = zeros(N, float) @@ -2851,6 +2873,7 @@ def _scalarfunc(*params): if (N == 1): grid = grid[0] xmin = xmin[0] + if callable(finish): # set up kwargs for `finish` function finish_args = _getargspec(finish).args @@ -2887,6 +2910,19 @@ def _scalarfunc(*params): return xmin +class _Brute_Wrapper(object): + """ + Object to wrap user cost function for optimize.brute, allowing picklability + """ + def __init__(self, f, args): + self.f = f + self.args = [] if args is None else args + + def __call__(self, x): + # flatten needed for one dimensional case. + return self.f(np.asarray(x).flatten(), *self.args) + + def show_options(solver=None, method=None, disp=True): """ Show documentation for additional options of optimization solvers. diff --git a/scipy/optimize/tests/test_optimize.py b/scipy/optimize/tests/test_optimize.py index e50c153a1feb..b96faec95d6c 100644 --- a/scipy/optimize/tests/test_optimize.py +++ b/scipy/optimize/tests/test_optimize.py @@ -1233,6 +1233,28 @@ def test_attributes_present(self): assert_(attribute in dir(res)) +def f1(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + + +def f2(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + + +def f3(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + + +def brute_func(z, *params): + return f1(z, *params) + f2(z, *params) + f3(z, *params) + + class TestBrute: # Test the "brute force" method def setup_method(self): @@ -1240,38 +1262,20 @@ def setup_method(self): self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) self.solution = np.array([-1.05665192, 1.80834843]) - def f1(self, z, *params): - x, y = z - a, b, c, d, e, f, g, h, i, j, k, l, scale = params - return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) - - def f2(self, z, *params): - x, y = z - a, b, c, d, e, f, g, h, i, j, k, l, scale = params - return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) - - def f3(self, z, *params): - x, y = z - a, b, c, d, e, f, g, h, i, j, k, l, scale = params - return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) - - def func(self, z, *params): - return self.f1(z, *params) + self.f2(z, *params) + self.f3(z, *params) - def test_brute(self): # test fmin - resbrute = optimize.brute(self.func, self.rranges, args=self.params, + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, full_output=True, finish=optimize.fmin) assert_allclose(resbrute[0], self.solution, atol=1e-3) - assert_allclose(resbrute[1], self.func(self.solution, *self.params), + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), atol=1e-3) # test minimize - resbrute = optimize.brute(self.func, self.rranges, args=self.params, + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, full_output=True, finish=optimize.minimize) assert_allclose(resbrute[0], self.solution, atol=1e-3) - assert_allclose(resbrute[1], self.func(self.solution, *self.params), + assert_allclose(resbrute[1], brute_func(self.solution, *self.params), atol=1e-3) def test_1D(self): @@ -1284,6 +1288,17 @@ def f(x): optimize.brute(f, [(-1, 1)], Ns=3, finish=None) + def test_workers(self): + # check that parallel evaluation works + resbrute = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None) + + resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params, + full_output=True, finish=None, workers=2) + + assert_equal(resbrute1[-1], resbrute[-1]) + assert_equal(resbrute1[0], resbrute[0]) + class TestIterationLimits(object): # Tests that optimisation does not give up before trying requested From 6c708c036f27d3172f866112ccb42bd8d64eed7a Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Thu, 8 Nov 2018 14:22:07 -0800 Subject: [PATCH 02/70] DOC: update 1.2.0 release notes [ci skip] --- .mailmap | 1 + doc/release/1.2.0-notes.rst | 677 +++++++++++++++++++++++++++++++++++- 2 files changed, 661 insertions(+), 17 deletions(-) diff --git a/.mailmap b/.mailmap index 4a697525d41a..edeea7b62ab0 100644 --- a/.mailmap +++ b/.mailmap @@ -68,6 +68,7 @@ Behzad Nouri behzad nouri Benjamin Root <> weathergod <> Benny Malengier Benny Bhavika Tekwani bhavikat +Blair Azzopardi bsdz Brett R. Murphy brettrmurphy Brian Hawthorne brian.hawthorne Brian Newsom Brian Newsom diff --git a/doc/release/1.2.0-notes.rst b/doc/release/1.2.0-notes.rst index 3913186d4d99..6c0b387fb40a 100644 --- a/doc/release/1.2.0-notes.rst +++ b/doc/release/1.2.0-notes.rst @@ -6,28 +6,209 @@ SciPy 1.2.0 Release Notes .. contents:: -SciPy 1.2.0 is the culmination of X months of hard work. It contains -many new features, numerous bug-fixes, improved test coverage and -better documentation. There have been a number of deprecations and -API changes in this release, which are documented below. All users -are encouraged to upgrade to this release, as there are a large number -of bug-fixes and optimizations. Moreover, our development attention -will now shift to bug-fix releases on the 1.0.x branch, and on adding -new features on the master branch. +SciPy 1.2.0 is the culmination of 6 months of hard work. It contains +many new features, numerous bug-fixes, improved test coverage and better +documentation. There have been a number of deprecations and API changes +in this release, which are documented below. All users are encouraged to +upgrade to this release, as there are a large number of bug-fixes and +optimizations. Before upgrading, we recommend that users check that +their own code does not use deprecated SciPy functionality (to do so, +run your code with ``python -Wd`` and check for ``DeprecationWarning`` s). +Our development attention will now shift to bug-fix releases on the +1.2.x branch, and on adding new features on the master branch. -This release requires Python 2.7 or 3.4+ and NumPy 1.8.1 or greater. +This release requires Python 2.7 or 3.4+ and NumPy 1.8.2 or greater. +For running on PyPy, PyPy3 6.0+ and NumPy 1.15.0 are required. + +Highlights of this release +-------------------------- + +- 1-D root finding improvements with a new solver, ``toms748``, and a new + unified interface, ``root_scalar`` +- New ``dual_annealing`` optimization method that combines stochastic and + local deterministic searching +- A new optimization algorithm, ``shgo`` (simplicial homology + global optimization) for derivative free optimization problems +- A new category of quaternion-based transformations are available in +`scipy.spatial.transform` New features ============ +`scipy.ndimage` improvements +-------------------------------- + +Proper spline coefficient calculations have been added for the ``mirror``, +``wrap``, and ``reflect`` modes of `scipy.ndimage.rotate` + +`scipy.fftpack` improvements +-------------------------------- + +DCT-IV, DST-IV, DCT-I, and DST-I orthonormalization are now supported in +`scipy.fftpack`. + +`scipy.interpolate` improvements +-------------------------------- + +`scipy.interpolate.pade` now accepts a new argument for the order of the +numerator + +`scipy.cluster` improvements +---------------------------- + +`scipy.cluster.vq.kmeans2` gained a new initialization method, kmeans++. + `scipy.special` improvements ---------------------------- -The function `softmax` was added to `scipy.special`. +The function ``softmax`` was added to `scipy.special`. + +`scipy.optimize` improvements +----------------------------- + +The one-dimensional nonlinear solvers have been given a unified interface +`scipy.optimize.root_scalar`, similar to the `scipy.optimize.root` interface +for multi-dimensional solvers. ``scipy.optimize.root_scalar(f, bracket=[a ,b], +method="brenth")`` is equivalent to ``scipy.optimize.brenth(f, a ,b)``. If no +``method`` is specified, an appropriate one will be selected based upon the +bracket and the number of derivatives available. + +The so-called Algorithm 748 of Alefeld, Potra and Shi for root-finding within +an enclosing interval has been added as `scipy.optimize.toms748`. This provides +guaranteed convergence to a root with convergence rate per function evaluation +of approximately 1.65 (for sufficiently well-behaved functions.) + +``differential_evolution`` now has the ``updating`` and ``workers`` keywords. +The first chooses between continuous updating of the best solution vector (the +default), or once per generation. Continuous updating can lead to faster +convergence. The ``workers`` keyword accepts an ``int`` or map-like callable, +and parallelises the solver (having the side effect of updating once per +generation). Supplying an ``int`` evaluates the trial solutions in N parallel +parts. Supplying a map-like callable allows other parallelisation approaches +(such as ``mpi4py``, or ``joblib``) to be used. + +``dual_annealing`` (and ``shgo`` below) is a powerful new general purpose +global optizimation (GO) algorithm. ``dual_annealing`` uses two annealing +processes to accelerate the convergence towards the global minimum of an +objective mathematical function. The first annealing process controls the +stochastic Markov chain searching and the second annealing process controls the +deterministic minimization. So, dual annealing is a hybrid method that takes +advantage of stochastic and local deterministic searching in an efficient way. + +``shgo`` (simplicial homology global optimization) is a similar algorithm +appropriate for solving black box and derivative free optimization (DFO) +problems. The algorithm generally converges to the global solution in finite +time. The convergence holds for non-linear inequality and +equality constraints. In addition to returning a global minimum, the +algorithm also returns any other global and local minima found after every +iteration. This makes it useful for exploring the solutions in a domain. + +`scipy.optimize.newton` can now accept a scalar or an array + +``MINPACK`` usage is now thread-safe, such that ``MINPACK`` + callbacks may +be used on multiple threads. + +`scipy.signal` improvements +--------------------------- + +Digital filter design functions now include a parameter to specify the sampling +rate. Previously, digital filters could only be specified using normalized +frequency, but different functions used different scales (e.g. 0 to 1 for +``butter`` vs 0 to π for ``freqz``), leading to errors and confusion. With +the ``fs`` parameter, ordinary frequencies can now be entered directly into +functions, with the normalization handled internally. + +``find_peaks`` and related functions no longer raise an exception if the +properties of a peak have unexpected values (e.g. a prominence of 0). A +``PeakPropertyWarning`` is given instead. + +The new keyword argument ``plateau_size`` was added to ``find_peaks``. +``plateau_size`` may be used to select peaks based on the length of the +flat top of a peak. + +``welch()`` and ``csd()`` methods in `scipy.signal` now support calculation +of a median average PSD, using ``average='mean'`` keyword + +`scipy.sparse` improvements +--------------------------- + +The `scipy.sparse.bsr_matrix.tocsr` method is now implemented directly instead +of converting via COO format, and the `scipy.sparse.bsr_matrix.tocsc` method +is now also routed via CSR conversion instead of COO. The efficiency of both +conversions is now improved. + +The issue where SuperLU or UMFPACK solvers crashed on matrices with +non-canonical format in `scipy.sparse.linalg` was fixed. The solver wrapper +canonicalizes the matrix if necessary before calling the SuperLU or UMFPACK +solver. + +The ``largest`` option of `scipy.sparse.linalg.lobpcg()` was fixed to have +a correct (and expected) behavior. The order of the eigenvalues was made +consistent with the ARPACK solver (``eigs()``), i.e. ascending for the +smallest eigenvalues, and descending for the largest eigenvalues. + +The `scipy.sparse.random` function is now faster and also supports integer and +complex values by passing the appropriate value to the ``dtype`` argument. + +`scipy.spatial` improvements +---------------------------- + +The function `scipy.spatial.distance.jaccard` was modified to return 0 instead +of ``np.nan`` when two all-zero vectors are compared. + +Support for the Jensen Shannon distance, the square-root of the divergence, has +been added under `scipy.spatial.distance.jensenshannon` + +An optional keyword was added to the function +`scipy.spatial.cKDTree.query_ball_point()` to sort or not sort the returned +indices. Not sorting the indices can speed up calls. + +A new category of quaternion-based transformations are available in +`scipy.spatial.transform`, including spherical linear interpolation of +rotations (``Slerp``), conversions to and from quaternions, Euler angles, +and general rotation and inversion capabilities +(`spatial.transform.Rotation`), and uniform random sampling of 3D +rotations (`spatial.transform.Rotation.random`). + +`scipy.stats` improvements +-------------------------- + +The Yeo-Johnson power transformation is now supported (``yeojohnson``, +``yeojohnson_llf``, ``yeojohnson_normmax``, ``yeojohnson_normplot``). Unlike +the Box-Cox transformation, the Yeo-Johnson transformation can accept negative +values. + +Added a general method to sample random variates based on the density only, in +the new function ``rvs_ratio_uniforms``. + +The Yule-Simon distribution (``yulesimon``) was added -- this is a new +discrete probability distribution. + +``stats`` and ``mstats`` now have access to a new regression method, +``siegelslopes``, a robust linear regression algorithm + +`scipy.stats.gaussian_kde` now has the ability to deal with weighted samples, +and should have a modest improvement in performance + +Levy Stable Parameter Estimation, PDF, and CDF calculations are now supported +for `scipy.stats.levy_stable`. + +The Brunner-Munzel test is now available as ``brunnermunzel`` in ``stats`` +and ``mstats`` + +`scipy.linalg` improvements +-------------------------- + +`scipy.linalg.lapack` now exposes the LAPACK routines using the Rectangular +Full Packed storage (RFP) for upper triangular, lower triangular, symmetric, +or Hermitian matrices; the upper trapezoidal fat matrix RZ decomposition +routines are now available as well. Deprecated features =================== +The functions ``hyp2f0``, ``hyp1f2`` and ``hyp3f0`` in ``scipy.special`` have +been deprecated. Backwards incompatible changes @@ -36,9 +217,39 @@ Backwards incompatible changes LAPACK version 3.4.0 or later is now required. Building with Apple Accelerate is no longer supported. -The function scipy.linalg.subspace_angles(A, B) now gives right -results for all angles. Before this function only returned -right values for those angles which were greater than pi/4. +The function ``scipy.linalg.subspace_angles(A, B)`` now gives correct +results for all angles. Before this, the function only returned +correct values for those angles which were greater than pi/4. + +Support for the Bento build system has been removed. Bento has not been +maintained for several years, and did not have good Python 3 or wheel support, +hence it was time to remove it. + +The required signature of `scipy.optimize.lingprog` ``method=simplex`` +callback function has changed. Before iteration begins, the simplex solver +first converts the problem into a standard form that does not, in general, +have the same variables or constraints +as the problem defined by the user. Previously, the simplex solver would pass a +user-specified callback function several separate arguments, such as the +current solution vector ``xk``, corresponding to this standard form problem. +Unfortunately, the relationship between the standard form problem and the +user-defined problem was not documented, limiting the utility of the +information passed to the callback function. + +In addition to numerous bug fix changes, the simplex solver now passes a +user-specified callback function a single ``OptimizeResult`` object containing +information that corresponds directly to the user-defined problem. In future +releases, this ``OptimizeResult`` object may be expanded to include additional +information, such as variables corresponding to the standard-form problem and +information concerning the relationship between the standard-form and +user-defined problems. + +The implementation of `scipy.sparse.random` has changed, and this affects the +numerical values returned for both ``sparse.random`` and ``sparse.rand`` for +some matrix shapes and a given seed. + +`scipy.optimize.newton` will no longer use Halley's method in cases where it +negatively impacts convergence Other changes ============= @@ -47,10 +258,442 @@ Other changes Authors ======= -Issues closed -------------- +* @endolith +* @luzpaz +* Hameer Abbasi + +* akahard2dj + +* Anton Akhmerov +* Joseph Albert +* alexthomas93 + +* ashish + +* atpage + +* Blair Azzopardi + +* Yoshiki Vázquez Baeza +* Bence Bagi + +* Christoph Baumgarten +* Lucas Bellomo + +* BH4 + +* Aditya Bharti +* Max Bolingbroke +* François Boulogne +* Ward Bradt + +* Matthew Brett +* Evgeni Burovski +* Rafał Byczek + +* Alfredo Canziani + +* CJ Carey +* Lucía Cheung + +* Poom Chiarawongse + +* Jeanne Choo + +* Robert Cimrman +* Graham Clenaghan + +* cynthia-rempel + +* Johannes Damp + +* Jaime Fernandez del Rio +* Dowon + +* emmi474 + +* Stefan Endres + +* Thomas Etherington + +* Alex Fikl + +* fo40225 + +* Joseph Fox-Rabinovitz +* Lars G +* Abhinav Gautam + +* Stiaan Gerber + +* C.A.M. Gerlach + +* Ralf Gommers +* Todd Goodall +* Lars Grueter + +* Sylvain Gubian + +* Matt Haberland +* David Hagen +* Will Handley + +* Charles Harris +* Ian Henriksen +* Thomas Hisch + +* Theodore Hu +* Michael Hudson-Doyle + +* Nicolas Hug + +* jakirkham + +* Jakob Jakobson + +* James + +* Jan Schlüter +* jeanpauphilet + +* josephmernst + +* Kai + +* Kai-Striega + +* kalash04 + +* Toshiki Kataoka + +* Konrad0 + +* Tom Krauss + +* Johannes Kulick +* Lars Grüter + +* Eric Larson +* Denis Laxalde +* Will Lee + +* Katrin Leinweber + +* Yin Li + +* P. L. Lim + +* Jesse Livezey + +* Duncan Macleod + +* MatthewFlamm + +* Nikolay Mayorov +* Mike McClurg + +* Christian Meyer + +* Mark Mikofski +* Naoto Mizuno + +* mohmmadd + +* Nathan Musoke +* Anju Geetha Nair + +* Andrew Nelson +* Ayappan P + +* Nick Papior +* Haesun Park + +* Ronny Pfannschmidt + +* pijyoi + +* Ilhan Polat +* Anthony Polloreno + +* Ted Pudlik +* puenka +* Eric Quintero +* Pradeep Reddy Raamana + +* Vyas Ramasubramani + +* Ramon Viñas + +* Tyler Reddy +* Joscha Reimer +* Antonio H Ribeiro +* richardjgowers + +* Rob + +* robbystk + +* Lucas Roberts + +* rohan + +* Joaquin Derrac Rus + +* Josua Sassen + +* Bruce Sharpe + +* Max Shinn + +* Scott Sievert +* Sourav Singh +* Strahinja Lukić + +* Kai Striega + +* Shinya SUZUKI + +* Mike Toews + +* Piotr Uchwat +* Miguel de Val-Borro + +* Nicky van Foreest +* Paul van Mulbregt +* Gael Varoquaux +* Pauli Virtanen +* Stefan van der Walt +* Warren Weckesser +* Joshua Wharton + +* Bernhard M. Wiedemann + +* Eric Wieser +* Josh Wilson +* Tony Xiang + +* Roman Yurchak + +* Roy Zywina + + +A total of 137 people contributed to this release. +People with a "+" by their names contributed a patch for the first time. +This list of names is automatically generated, and may not be fully complete. + +Issues closed for 1.2.0 +----------------------- + +* `#1240 `__: Allowing multithreaded use of minpack through scipy.optimize... +* `#1432 `__: scipy.stats.mode extremely slow (Trac #905) +* `#3372 `__: Please add Sphinx search field to online scipy html docs +* `#3678 `__: _clough_tocher_2d_single direction between centroids +* `#4174 `__: lobpcg "largest" option invalid? +* `#5493 `__: anderson_ksamp p-values>1 +* `#5743 `__: slsqp fails to detect infeasible problem +* `#6139 `__: scipy.optimize.linprog failed to find a feasible starting point... +* `#6358 `__: stats: docstring for `vonmises_line` points to `vonmises_line`... +* `#6498 `__: runtests.py is missing in pypi distfile +* `#7426 `__: scipy.stats.ksone(n).pdf(x) returns nan for positive values of... +* `#7455 `__: scipy.stats.ksone.pdf(2,x) return incorrect values for x near... +* `#7456 `__: scipy.special.smirnov and scipy.special.smirnovi have accuracy... +* `#7492 `__: scipy.special.kolmogorov(x)/kolmogi(p) inefficient, inaccurate... +* `#7914 `__: TravisCI not failing when it should for -OO run +* `#8064 `__: linalg.solve test crashes on Windows +* `#8212 `__: LAPACK Rectangular Full Packed routines +* `#8256 `__: differential_evolution bug converges to wrong results in complex... +* `#8443 `__: Deprecate `hyp2f0`, `hyp1f2`, and `hyp3f0`? +* `#8452 `__: DOC: ARPACK tutorial has two conflicting equations +* `#8680 `__: scipy fails compilation when building from source +* `#8686 `__: Division by zero in _trustregion.py when x0 is exactly equal... +* `#8700 `__: _MINPACK_LOCK not held when calling into minpack from least_squares +* `#8786 `__: erroneous moment values for t-distribution +* `#8791 `__: Checking COLA condition in istft should be optional (or omitted) +* `#8843 `__: imresize cannot be deprecated just yet +* `#8844 `__: Inverse Wishart Log PDF Incorrect for Non-diagonal Scale Matrix? +* `#8878 `__: vonmises and vonmises_line in stats: vonmises wrong and superfluous? +* `#8895 `__: v1.1.0 `ndi.rotate` documentation – reused parameters not filled... +* `#8900 `__: Missing complex conjugation in scipy.sparse.linalg.LinearOperator +* `#8904 `__: BUG: if zero derivative at root, then Newton fails with RuntimeWarning +* `#8911 `__: make_interp_spline bc_type incorrect input interpretation +* `#8942 `__: MAINT: Refactor `_linprog.py` and `_linprog_ip.py` to remove... +* `#8947 `__: np.int64 in scipy.fftpack.next_fast_len +* `#9020 `__: BUG: linalg.subspace_angles gives wrong results +* `#9033 `__: scipy.stats.normaltest sometimes gives incorrect returns b/c... +* `#9036 `__: Bizarre times for `scipy.sparse.rand` function with 'low' density... +* `#9044 `__: optimize.minimize(method=`trust-constr`) result dict does not... +* `#9071 `__: doc/linalg: add cho_solve_banded to see also of cholesky_banded +* `#9082 `__: eigenvalue sorting in scipy.sparse.linalg.eigsh +* `#9086 `__: signaltools.py:491: FutureWarning: Using a non-tuple sequence... +* `#9091 `__: test_spline_filter failure on 32-bit +* `#9122 `__: Typo on scipy minimization tutorial +* `#9135 `__: doc error at https://docs.scipy.org/doc/scipy/reference/tutorial/stats/discrete_poisson.html +* `#9167 `__: DOC: BUG: typo in ndimage LowLevelCallable tutorial example +* `#9169 `__: truncnorm does not work if b < a in scipy.stats +* `#9250 `__: scipy.special.tests.test_mpmath::TestSystematic::test_pcfw fails... +* `#9259 `__: rv.expect() == rv.mean() is false for rv.mean() == nan (and inf) +* `#9286 `__: DOC: Rosenbrock expression in optimize.minimize tutorial +* `#9316 `__: SLSQP fails in nested optimization +* `#9337 `__: scipy.signal.find_peaks key typo in documentation +* `#9345 `__: Example from documentation of scipy.sparse.linalg.eigs raises... +* `#9383 `__: Default value for "mode" in "ndimage.shift" +* `#9419 `__: dual_annealing off by one in the number of iterations +* `#9442 `__: Error in Defintion of Rosenbrock Function +* `#9453 `__: TST: test_eigs_consistency() doesn't have consistent results -Pull requests -------------- +Pull requests for 1.2.0 +----------------------- +* `#7352 `__: ENH: add Brunner Munzel test to scipy.stats. +* `#7373 `__: BUG: Jaccard distance for all-zero arrays would return np.nan +* `#7374 `__: ENH: Add PDF, CDF and parameter estimation for Stable Distributions +* `#8098 `__: ENH: Add shgo for global optimization of NLPs. +* `#8203 `__: ENH: adding simulated dual annealing to optimize +* `#8259 `__: Option to follow original Storn and Price algorithm and its parallelisation +* `#8293 `__: ENH add ratio-of-uniforms method for rv generation to scipy.stats +* `#8294 `__: BUG: Fix slowness in stats.mode +* `#8295 `__: ENH: add Jensen Shannon distance to `scipy.spatial.distance` +* `#8357 `__: ENH: vectorize scalar zero-search-functions +* `#8397 `__: Add `fs=` parameter to filter design functions +* `#8537 `__: ENH: Implement mode parameter for spline filtering. +* `#8558 `__: ENH: small speedup for stats.gaussian_kde +* `#8560 `__: BUG: fix p-value calc of anderson_ksamp in scipy.stats +* `#8614 `__: ENH: correct p-values for stats.kendalltau and stats.mstats.kendalltau +* `#8670 `__: ENH: Require Lapack 3.4.0 +* `#8683 `__: Correcting kmeans documentation +* `#8725 `__: MAINT: Cleanup scipy.optimize.leastsq +* `#8726 `__: BUG: Fix _get_output in scipy.ndimage to support string +* `#8733 `__: MAINT: stats: A bit of clean up. +* `#8737 `__: BUG: Improve numerical precision/convergence failures of smirnov/kolmogorov +* `#8738 `__: MAINT: stats: A bit of clean up in test_distributions.py. +* `#8740 `__: BF/ENH: make minpack thread safe +* `#8742 `__: BUG: Fix division by zero in trust-region optimization methods +* `#8746 `__: MAINT: signal: Fix a docstring of a private function, and fix... +* `#8750 `__: DOC clarified description of norminvgauss in scipy.stats +* `#8753 `__: DOC: signal: Fix a plot title in the chirp docstring. +* `#8755 `__: DOC: MAINT: Fix link to the wheel documentation in developer... +* `#8760 `__: BUG: stats: boltzmann wasn't setting the upper bound. +* `#8763 `__: [DOC] Improved scipy.cluster.hierarchy documentation +* `#8765 `__: DOC: added example for scipy.stat.mstats.tmin +* `#8788 `__: DOC: fix definition of optional `disp` parameter +* `#8802 `__: MAINT: Suppress dd_real unused function compiler warnings. +* `#8803 `__: ENH: Add full_output support to optimize.newton() +* `#8804 `__: MAINT: stats cleanup +* `#8808 `__: DOC: add note about isinstance for frozen rvs +* `#8812 `__: Updated numpydoc submodule +* `#8813 `__: MAINT: stats: Fix multinomial docstrings, and do some clean up. +* `#8816 `__: BUG: fixed _stats of t-distribution in scipy.stats +* `#8817 `__: BUG: ndimage: Fix validation of the origin argument in correlate... +* `#8822 `__: BUG: integrate: Fix crash with repeated t values in odeint. +* `#8832 `__: Hyperlink DOIs against preferred resolver +* `#8837 `__: BUG: sparse: Ensure correct dtype for sparse comparison operations. +* `#8839 `__: DOC: stats: A few tweaks to the linregress docstring. +* `#8846 `__: BUG: stats: Fix logpdf method of invwishart. +* `#8849 `__: DOC: signal: Fixed mistake in the firwin docstring. +* `#8854 `__: DOC: fix type descriptors in ltisys documentation +* `#8865 `__: Fix tiny typo in docs for chi2 pdf +* `#8870 `__: Fixes related to invertibility of STFT +* `#8872 `__: ENH: special: Add the softmax function +* `#8874 `__: DOC correct gamma function in docstrings in scipy.stats +* `#8876 `__: ENH: Added TOMS Algorithm 748 as 1-d root finder; 17 test function... +* `#8882 `__: ENH: Only use Halley's adjustment to Newton if close enough. +* `#8883 `__: FIX: optimize: make jac and hess truly optional for 'trust-constr' +* `#8885 `__: TST: Do not error on warnings raised about non-tuple indexing. +* `#8887 `__: MAINT: filter out np.matrix PendingDeprecationWarning's in numpy... +* `#8889 `__: DOC: optimize: separate legacy interfaces from new ones +* `#8890 `__: ENH: Add optimize.root_scalar() as a universal dispatcher for... +* `#8899 `__: DCT-IV, DST-IV and DCT-I, DST-I orthonormalization support in... +* `#8901 `__: MAINT: Reorganize flapack.pyf.src file +* `#8907 `__: BUG: ENH: Check if guess for newton is already zero before checking... +* `#8908 `__: ENH: Make sorting optional for cKDTree.query_ball_point() +* `#8910 `__: DOC: sparse.csgraph simple examples. +* `#8914 `__: DOC: interpolate: fix equivalences of string aliases +* `#8918 `__: add float_control(precise, on) to _fpumode.c +* `#8919 `__: MAINT: interpolate: improve error messages for common `bc_type`... +* `#8920 `__: DOC: update Contributing to SciPy to say "prefer no PEP8 only... +* `#8924 `__: MAINT: special: deprecate `hyp2f0`, `hyp1f2`, and `hyp3f0` +* `#8927 `__: MAINT: special: remove `errprint` +* `#8932 `__: Fix broadcasting scale arg of entropy +* `#8936 `__: Fix (some) non-tuple index warnings +* `#8937 `__: ENH: implement sparse matrix BSR to CSR conversion directly. +* `#8938 `__: DOC: add @_ni_docstrings.docfiller in ndimage.rotate +* `#8940 `__: Update _discrete_distns.py +* `#8943 `__: DOC: Finish dangling sentence in `convolve` docstring +* `#8944 `__: MAINT: Address tuple indexing and warnings +* `#8945 `__: ENH: spatial.transform.Rotation [GSOC2018] +* `#8950 `__: csgraph Dijkstra function description rewording +* `#8953 `__: DOC, MAINT: HTTP -> HTTPS, and other linkrot fixes +* `#8955 `__: BUG: np.int64 in scipy.fftpack.next_fast_len +* `#8958 `__: MAINT: Add more descriptive error message for phase one simplex. +* `#8962 `__: BUG: sparse.linalg: add missing conjugate to _ScaledLinearOperator.adjoint +* `#8963 `__: BUG: sparse.linalg: downgrade LinearOperator TypeError to warning +* `#8965 `__: ENH: Wrapped RFP format and RZ decomposition routines +* `#8969 `__: MAINT: doc and code fixes for optimize.newton +* `#8970 `__: Added 'average' keyword for welch/csd to enable median averaging +* `#8971 `__: Better imresize deprecation warning +* `#8972 `__: MAINT: Switch np.where(c) for np.nonzero(c) +* `#8975 `__: MAINT: Fix warning-based failures +* `#8979 `__: DOC: fix description of count_sort keyword of dendrogram +* `#8982 `__: MAINT: optimize: Fixed minor mistakes in test_linprog.py (#8978) +* `#8984 `__: BUG: sparse.linalg: ensure expm casts integer inputs to float +* `#8986 `__: BUG: optimize/slsqp: do not exit with convergence on steps where... +* `#8989 `__: MAINT: use collections.abc in basinhopping +* `#8990 `__: ENH extend p-values of anderson_ksamp in scipy.stats +* `#8991 `__: ENH: Weighted kde +* `#8993 `__: ENH: spatial.transform.Rotation.random [GSOC 2018] +* `#8994 `__: ENH: spatial.transform.Slerp [GSOC 2018] +* `#8995 `__: TST: time.time in test +* `#9007 `__: Fix typo in fftpack.rst +* `#9013 `__: Added correct plotting code for two sided output from spectrogram +* `#9014 `__: BUG: differential_evolution with inf objective functions +* `#9017 `__: BUG: fixed #8446 corner case for asformat(array|dense) +* `#9018 `__: MAINT: _lib/ccallback: remove unused code +* `#9021 `__: BUG: Issue with subspace_angles +* `#9022 `__: DOC: Added "See Also" section to lombscargle docstring +* `#9034 `__: BUG: Fix tolerance printing behavior, remove meaningless tol... +* `#9035 `__: TST: improve signal.bsplines test coverage +* `#9037 `__: ENH: add a new init method for k-means +* `#9039 `__: DOC: Add examples to fftpack.irfft docstrings +* `#9048 `__: ENH: scipy.sparse.random +* `#9050 `__: BUG: scipy.io.hb_write: fails for matrices not in csc format +* `#9051 `__: MAINT: Fix slow sparse.rand for k < mn/3 (#9036). +* `#9054 `__: MAINT: spatial: Explicitly initialize LAPACK output parameters. +* `#9055 `__: DOC: Add examples to scipy.special docstrings +* `#9056 `__: ENH: Use one thread in OpenBLAS +* `#9059 `__: DOC: Update README with link to Code of Conduct +* `#9060 `__: BLD: remove support for the Bento build system. +* `#9062 `__: DOC add sections to overview in scipy.stats +* `#9066 `__: BUG: Correct "remez" error message +* `#9069 `__: DOC: update linalg section of roadmap for LAPACK versions. +* `#9079 `__: MAINT: add spatial.transform to refguide check; complete some... +* `#9081 `__: MAINT: Add warnings if pivot value is close to tolerance in linprog(method='simplex') +* `#9084 `__: BUG fix incorrect p-values of kurtosistest in scipy.stats +* `#9095 `__: DOC: add sections to mstats overview in scipy.stats +* `#9096 `__: BUG: Add test for Stackoverflow example from issue 8174. +* `#9101 `__: ENH: add Siegel slopes (robust regression) to scipy.stats +* `#9105 `__: allow resample_poly() to output float32 for float32 inputs. +* `#9112 `__: MAINT: optimize: make trust-constr accept constraint dict (#9043) +* `#9118 `__: Add doc entry to cholesky_banded +* `#9120 `__: eigsh documentation parameters +* `#9125 `__: interpolative: correctly reconstruct full rank matrices +* `#9126 `__: MAINT: Use warnings for unexpected peak properties +* `#9129 `__: BUG: Do not catch and silence KeyboardInterrupt +* `#9131 `__: DOC: Correct the typo in scipy.optimize tutorial page +* `#9133 `__: FIX: Avoid use of bare except +* `#9134 `__: DOC: Update of 'return_eigenvectors' description +* `#9137 `__: DOC: typo fixes for discrete Poisson tutorial +* `#9139 `__: FIX: Doctest failure in optimize tutorial +* `#9143 `__: DOC: missing sigma in Pearson r formula +* `#9145 `__: MAINT: Refactor linear programming solvers +* `#9149 `__: FIX: Make scipy.odr.ODR ifixx equal to its data.fix if given +* `#9156 `__: DOC: special: Mention the sigmoid function in the expit docstring. +* `#9160 `__: Fixed a latex delimiter error in levy() +* `#9170 `__: DOC: correction / update of docstrings of distributions in scipy.stats +* `#9171 `__: better description of the hierarchical clustering parameter +* `#9174 `__: domain check for a < b in stats.truncnorm +* `#9175 `__: DOC: Minor grammar fix +* `#9176 `__: BUG: CloughTocher2DInterpolator: fix miscalculation at neighborless... +* `#9177 `__: BUILD: Document the "clean" target in the doc/Makefile. +* `#9178 `__: MAINT: make refguide-check more robust for printed numpy arrays +* `#9186 `__: MAINT: Remove np.ediff1d occurence +* `#9188 `__: DOC: correct typo in extending ndimage with C +* `#9190 `__: ENH: Support specifying axes for fftconvolve +* `#9192 `__: MAINT: optimize: fixed @pv style suggestions from #9112 +* `#9200 `__: Fix make_interp_spline(..., k=0 or 1, axis<0) +* `#9201 `__: BUG: sparse.linalg/gmres: use machine eps in breakdown check +* `#9204 `__: MAINT: fix up stats.spearmanr and match mstats.spearmanr with... +* `#9206 `__: MAINT: include benchmarks and dev files in sdist. +* `#9208 `__: TST: signal: bump bsplines test tolerance for complex data +* `#9210 `__: TST: mark tests as slow, fix missing random seed +* `#9211 `__: ENH: add capability to specify orders in pade func +* `#9217 `__: MAINT: Include ``success`` and ``nit`` in OptimizeResult returned... +* `#9222 `__: ENH: interpolate: Use scipy.spatial.distance to speed-up Rbf +* `#9229 `__: MNT: Fix Fourier filter double case +* `#9233 `__: BUG: spatial/distance: fix pdist/cdist performance regression... +* `#9234 `__: FIX: Proper suppression +* `#9235 `__: BENCH: rationalize slow benchmarks + miscellaneous fixes +* `#9238 `__: BENCH: limit number of parameter combinations in spatial.*KDTree... +* `#9239 `__: DOC: stats: Fix LaTeX markup of a couple distribution PDFs. +* `#9241 `__: ENH: Evaluate plateau size during peak finding +* `#9242 `__: ENH: stats: Implement _ppf and _logpdf for crystalball, and do... +* `#9246 `__: DOC: Properly render versionadded directive in HTML documentation +* `#9255 `__: DOC: mention RootResults in optimization reference guide +* `#9260 `__: TST: relax some tolerances so tests pass with x87 math +* `#9264 `__: TST Use assert_raises "match" parameter instead of the "message"... +* `#9267 `__: DOC: clarify expect() return val when moment is inf/nan +* `#9272 `__: DOC: Add description of default bounds to linprog +* `#9277 `__: MAINT: sparse/linalg: make test deterministic +* `#9278 `__: MAINT: interpolate: pep8 cleanup in test_polyint +* `#9279 `__: Fixed docstring for resample +* `#9280 `__: removed first check for float in get_sum_dtype +* `#9281 `__: BUG: only accept 1d input for bartlett / levene in scipy.stats +* `#9282 `__: MAINT: dense_output and t_eval are mutually exclusive inputs +* `#9283 `__: MAINT: add docs and do some cleanups in interpolate.Rbf +* `#9288 `__: Run distance_transform_edt tests on all types +* `#9294 `__: DOC: fix the formula typo +* `#9298 `__: MAINT: optimize/trust-constr: restore .niter attribute for backward-compat +* `#9299 `__: DOC: clarification of default rvs method in scipy.stats +* `#9301 `__: MAINT: removed unused import sys +* `#9302 `__: MAINT: removed unused imports +* `#9303 `__: DOC: signal: Refer to fs instead of nyq in the firwin docstring. +* `#9305 `__: ENH: Added Yeo-Johnson power transformation +* `#9306 `__: ENH - add dual annealing +* `#9309 `__: ENH add the yulesimon distribution to scipy.stats +* `#9317 `__: Nested SLSQP bug fix. +* `#9320 `__: MAINT: stats: avoid underflow in stats.geom.ppf +* `#9326 `__: Add example for Rosenbrock function +* `#9332 `__: Sort file lists +* `#9340 `__: Fix typo in find_peaks documentation +* `#9343 `__: MAINT Use np.full when possible +* `#9344 `__: DOC: added examples to docstring of dirichlet class +* `#9346 `__: DOC: Fix import of scipy.sparse.linalg in example (#9345) +* `#9350 `__: Fix interpolate read only +* `#9351 `__: MAINT: special.erf: use the x->-x symmetry +* `#9356 `__: Fix documentation typo +* `#9358 `__: DOC: improve doc for ksone and kstwobign in scipy.stats +* `#9362 `__: DOC: Change datatypes of A matrices in linprog +* `#9364 `__: MAINT: Adds implicit none to fftpack fortran sources +* `#9369 `__: DOC: minor tweak to CoC (updated NumFOCUS contact address). +* `#9373 `__: Fix exception if python is called with -OO option +* `#9374 `__: FIX: AIX compilation issue with NAN and INFINITY +* `#9376 `__: COBLYA -> COBYLA in docs +* `#9377 `__: DOC: Add examples integrate: fixed_quad and quadrature +* `#9379 `__: MAINT: TST: Make tests NumPy 1.8 compatible +* `#9385 `__: CI: On Travis matrix "OPTIMIZE=-OO" flag ignored +* `#9387 `__: Fix defaut value for 'mode' in 'ndimage.shift' in the doc +* `#9392 `__: BUG: rank has to be integer in rank_filter: fixed issue 9388 +* `#9399 `__: DOC: Misc. typos +* `#9400 `__: TST: stats: Fix the expected r-value of a linregress test. +* `#9405 `__: BUG: np.hstack does not accept generator expressions +* `#9408 `__: ENH: linalg: Shorter ill-conditioned warning message +* `#9418 `__: DOC: Fix ndimage docstrings and reduce doc build warnings +* `#9421 `__: DOC: Add missing docstring examples in scipy.spatial +* `#9422 `__: DOC: Add an example to integrate.newton_cotes +* `#9427 `__: BUG: Fixed defect with maxiter #9419 in dual annealing +* `#9431 `__: BENCH: Add dual annealing to scipy benchmark (see #9415) +* `#9435 `__: DOC: Add docstring examples for stats.binom_test +* `#9443 `__: DOC: Fix the order of indices in optimize tutorial +* `#9444 `__: MAINT: interpolate: use operator.index for checking/coercing... +* `#9445 `__: DOC: Added missing example to stats.mstats.kruskal +* `#9446 `__: DOC: Add note about version changed for jaccard distance +* `#9447 `__: BLD: version-script handling in setup.py +* `#9448 `__: TST: skip a problematic linalg test +* `#9449 `__: TST: fix missing seed in lobpcg test. +* `#9456 `__: TST: test_eigs_consistency() now sorts output From 24617a223878d033adcb132d75df0da507a3d56a Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Fri, 9 Nov 2018 08:49:10 -0800 Subject: [PATCH 03/70] REL: set version to 1.3.0.dev0 and add 1.3.0 release notes --- doc/release/1.3.0-notes.rst | 53 ++++++++++++++++++++++++++++++++++++ doc/source/release.1.3.0.rst | 1 + doc/source/release.rst | 1 + pavement.py | 4 +-- setup.py | 2 +- 5 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 doc/release/1.3.0-notes.rst create mode 100644 doc/source/release.1.3.0.rst diff --git a/doc/release/1.3.0-notes.rst b/doc/release/1.3.0-notes.rst new file mode 100644 index 000000000000..76aa585ac677 --- /dev/null +++ b/doc/release/1.3.0-notes.rst @@ -0,0 +1,53 @@ +========================== +SciPy 1.3.0 Release Notes +========================== + +.. note:: Scipy 1.3.0 is not released yet! + +.. contents:: + +SciPy 1.3.0 is the culmination of X months of hard work. It contains +many new features, numerous bug-fixes, improved test coverage and better +documentation. There have been a number of deprecations and API changes +in this release, which are documented below. All users are encouraged to +upgrade to this release, as there are a large number of bug-fixes and +optimizations. Before upgrading, we recommend that users check that +their own code does not use deprecated SciPy functionality (to do so, +run your code with ``python -Wd`` and check for ``DeprecationWarning`` s). +Our development attention will now shift to bug-fix releases on the +1.3.x branch, and on adding new features on the master branch. + +This release requires Python 2.7 or 3.4+ and NumPy 1.8.2 or greater. + +For running on PyPy, PyPy3 6.0+ and NumPy 1.15.0 are required. + +Highlights of this release +-------------------------- + +- + +New features +============ + + +Deprecated features +=================== + + +Backwards incompatible changes +============================== + + +Other changes +============= + + +Authors +======= + + +Issues closed +------------- + +Pull requests +------------- diff --git a/doc/source/release.1.3.0.rst b/doc/source/release.1.3.0.rst new file mode 100644 index 000000000000..7156aac05f0f --- /dev/null +++ b/doc/source/release.1.3.0.rst @@ -0,0 +1 @@ +.. include:: ../release/1.3.0-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 0cd533e7fc50..4851fc6b81a1 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release Notes .. toctree:: :maxdepth: 1 + release.1.3.0 release.1.2.0 release.1.1.0 release.1.0.1 diff --git a/pavement.py b/pavement.py index e39ed62bf711..964c689d8b93 100644 --- a/pavement.py +++ b/pavement.py @@ -113,10 +113,10 @@ #----------------------------------- # Source of the release notes -RELEASE = 'doc/release/1.2.0-notes.rst' +RELEASE = 'doc/release/1.3.0-notes.rst' # Start/end of the log (from git) -LOG_START = 'v1.1.0' +LOG_START = 'v1.2.0' LOG_END = 'master' diff --git a/setup.py b/setup.py index a4a9c1c31eb6..67661baad8b0 100755 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ """ MAJOR = 1 -MINOR = 2 +MINOR = 3 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) From d20be51471902746ffee3cfef9d45970de22ab00 Mon Sep 17 00:00:00 2001 From: Mana Borwornpadungkitti Date: Wed, 14 Nov 2018 17:12:57 -0500 Subject: [PATCH 04/70] DOC: linalg: fix lstsq docstring on residues shape ```python >>> import scipy.linalg >>> scipy.linalg.lstsq([[1]], [1])[1].shape (0,) ``` See also: https://github.com/scipy/scipy/blob/0a9e93e23453cce2f5e861bf6dc4168d788fdb51/scipy/linalg/basic.py#L1246 --- scipy/linalg/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/linalg/basic.py b/scipy/linalg/basic.py index 95829890569f..467cf57fc9db 100644 --- a/scipy/linalg/basic.py +++ b/scipy/linalg/basic.py @@ -1084,7 +1084,7 @@ def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, Least-squares solution. Return shape matches shape of `b`. residues : (0,) or () or (K,) ndarray Sums of residues, squared 2-norm for each column in ``b - a x``. - If rank of matrix a is ``< N`` or ``N > M``, or ``'gelsy'`` is used, + If rank of matrix a is ``< N`` or ``N >= M``, or ``'gelsy'`` is used, this is a length zero array. If b was 1-D, this is a () shape array (numpy scalar), otherwise the shape is (K,). rank : int From 444046fb9cc9cad42944bd07dd964120b71f2912 Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Thu, 15 Nov 2018 07:37:44 +0100 Subject: [PATCH 05/70] MAINT: linalg: lstsq clean up --- scipy/linalg/basic.py | 61 +++++--------- scipy/linalg/tests/test_basic.py | 138 ++++++------------------------- 2 files changed, 43 insertions(+), 156 deletions(-) diff --git a/scipy/linalg/basic.py b/scipy/linalg/basic.py index 467cf57fc9db..1fefd78f7da1 100644 --- a/scipy/linalg/basic.py +++ b/scipy/linalg/basic.py @@ -1037,13 +1037,8 @@ def det(a, overwrite_a=False, check_finite=True): 'det.getrf' % -info) return a_det -# Linear Least Squares - - -class LstsqLapackError(LinAlgError): - pass - +# Linear Least Squares def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, check_finite=True, lapack_driver=None): """ @@ -1054,9 +1049,9 @@ def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, Parameters ---------- a : (M, N) array_like - Left hand side matrix (2-D array). + Left hand side array b : (M,) or (M, K) array_like - Right hand side matrix or vector (1-D or 2-D array). + Right hand side array cond : float, optional Cutoff for 'small' singular values; used to determine effective rank of a. Singular values smaller than @@ -1082,16 +1077,15 @@ def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, ------- x : (N,) or (N, K) ndarray Least-squares solution. Return shape matches shape of `b`. - residues : (0,) or () or (K,) ndarray - Sums of residues, squared 2-norm for each column in ``b - a x``. - If rank of matrix a is ``< N`` or ``N >= M``, or ``'gelsy'`` is used, - this is a length zero array. If b was 1-D, this is a () shape array - (numpy scalar), otherwise the shape is (K,). + residues : (K,) ndarray or float + Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and + ``rank(A) == n`` (returns a scalar if b is 1-D). Otherwise a + (0,)-shaped array is returned. rank : int - Effective rank of matrix `a`. - s : (min(M,N),) ndarray or None + Effective rank of `a`. + s : (min(M, N),) ndarray or None Singular values of `a`. The condition number of a is - ``abs(s[0] / s[-1])``. None is returned when ``'gelsy'`` is used. + ``abs(s[0] / s[-1])``. Raises ------ @@ -1099,12 +1093,17 @@ def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, If computation does not converge. ValueError - When parameters are wrong. + When parameters are not compatible. See Also -------- optimize.nnls : linear least squares with non-negativity constraint + Notes + ----- + When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped + array and `s` is always ``None``. + Examples -------- >>> from scipy.linalg import lstsq @@ -1153,14 +1152,15 @@ def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2: - raise ValueError('expected matrix') + raise ValueError('Input array a should be 2-D') m, n = a1.shape if len(b1.shape) == 2: nrhs = b1.shape[1] else: nrhs = 1 if m != b1.shape[0]: - raise ValueError('incompatible dimensions') + raise ValueError('Shape mismatch: a and b should have the same number' + ' of rows ({} != {}).'.format(m, b1.shape[0])) if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1)) if n == 0: @@ -1207,29 +1207,6 @@ def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False, elif driver == 'gelsd': if real_data: lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond) - if iwork == 0: - # this is LAPACK bug 0038: dgelsd does not provide the - # size of the iwork array in query mode. This bug was - # fixed in LAPACK 3.2.2, released July 21, 2010. - mesg = ("internal gelsd driver lwork query error, " - "required iwork dimension not returned. " - "This is likely the result of LAPACK bug " - "0038, fixed in LAPACK 3.2.2 (released " - "July 21, 2010). ") - - if lapack_driver is None: - # restart with gelss - lstsq.default_lapack_driver = 'gelss' - mesg += "Falling back to 'gelss' driver." - warn(mesg, RuntimeWarning, stacklevel=2) - return lstsq(a, b, cond, overwrite_a, overwrite_b, - check_finite, lapack_driver='gelss') - - # can't proceed, bail out - mesg += ("Use a different lapack_driver when calling lstsq" - " or upgrade LAPACK.") - raise LstsqLapackError(mesg) - x, s, rank, info = lapack_func(a1, b1, lwork, iwork, cond, False, False) else: # complex data diff --git a/scipy/linalg/tests/test_basic.py b/scipy/linalg/tests/test_basic.py index 4183da2a4a06..924a5eae9d06 100644 --- a/scipy/linalg/tests/test_basic.py +++ b/scipy/linalg/tests/test_basic.py @@ -1,9 +1,3 @@ -# -# Created by: Pearu Peterson, March 2002 -# -""" Test functions for linalg.basic module - -""" from __future__ import division, print_function, absolute_import import warnings @@ -26,26 +20,10 @@ solve_circulant, circulant, LinAlgError, block_diag, matrix_balance, LinAlgWarning) -from scipy.linalg.basic import LstsqLapackError from scipy.linalg._testutils import assert_no_overwrite from scipy._lib._version import NumpyVersion - -""" -Bugs: -1) solve.check_random_sym_complex fails if a is complex - and transpose(a) = conjugate(a) (a is Hermitian). -""" -__usage__ = """ -Build linalg: - python setup_linalg.py build -Run tests if scipy is installed: - python -c 'import scipy;scipy.linalg.test()' -Run tests if linalg is not installed: - python tests/test_basic.py -""" - REAL_DTYPES = [np.float32, np.float64, np.longdouble] COMPLEX_DTYPES = [np.complex64, np.complex128, np.clongdouble] DTYPES = REAL_DTYPES + COMPLEX_DTYPES @@ -611,7 +589,6 @@ def test_random_sym(self): def test_random_sym_complex(self): n = 20 a = random([n, n]) - # XXX: with the following addition the accuracy will be very low a = a + 1j*random([n, n]) for i in range(n): a[i, i] = abs(20*(.1+a[i, i])) @@ -951,20 +928,10 @@ def test_simple_exact(self): a1 = a.copy() b = np.array(bt, dtype=dtype) b1 = b.copy() - try: - out = lstsq(a1, b1, - lapack_driver=lapack_driver, - overwrite_a=overwrite, - overwrite_b=overwrite) - except LstsqLapackError: - if lapack_driver is None: - mesg = ('LstsqLapackError raised with ' - 'lapack_driver being None.') - raise AssertionError(mesg) - else: - # can't proceed, skip to the next iteration - continue - + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == 2, @@ -984,19 +951,9 @@ def test_simple_overdet(self): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() - try: - out = lstsq(a1, b1, lapack_driver=lapack_driver, - overwrite_a=overwrite, - overwrite_b=overwrite) - except LstsqLapackError: - if lapack_driver is None: - mesg = ('LstsqLapackError raised with ' - 'lapack_driver being None.') - raise AssertionError(mesg) - else: - # can't proceed, skip to the next iteration - continue - + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) x = out[0] if lapack_driver == 'gelsy': residuals = np.sum((b - a.dot(x))**2) @@ -1023,18 +980,9 @@ def test_simple_overdet_complex(self): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() - try: - out = lstsq(a1, b1, lapack_driver=lapack_driver, - overwrite_a=overwrite, - overwrite_b=overwrite) - except LstsqLapackError: - if lapack_driver is None: - mesg = ('LstsqLapackError raised with ' - 'lapack_driver being None.') - raise AssertionError(mesg) - else: - # can't proceed, skip to the next iteration - continue + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) x = out[0] if lapack_driver == 'gelsy': @@ -1065,18 +1013,9 @@ def test_simple_underdet(self): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() - try: - out = lstsq(a1, b1, lapack_driver=lapack_driver, - overwrite_a=overwrite, - overwrite_b=overwrite) - except LstsqLapackError: - if lapack_driver is None: - mesg = ('LstsqLapackError raised with ' - 'lapack_driver being None.') - raise AssertionError(mesg) - else: - # can't proceed, skip to the next iteration - continue + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) x = out[0] r = out[2] @@ -1100,19 +1039,10 @@ def test_random_exact(self): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() - try: - out = lstsq(a1, b1, - lapack_driver=lapack_driver, - overwrite_a=overwrite, - overwrite_b=overwrite) - except LstsqLapackError: - if lapack_driver is None: - mesg = ('LstsqLapackError raised with ' - 'lapack_driver being None.') - raise AssertionError(mesg) - else: - # can't proceed, skip to the next iteration - continue + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == n, 'expected efficient rank %s, ' @@ -1177,20 +1107,10 @@ def test_random_overdet(self): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() - try: - out = lstsq(a1, b1, - lapack_driver=lapack_driver, - overwrite_a=overwrite, - overwrite_b=overwrite) - except LstsqLapackError: - if lapack_driver is None: - mesg = ('LstsqLapackError raised with ' - 'lapack_driver being None.') - raise AssertionError(mesg) - else: - # can't proceed, skip to the next iteration - continue - + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == m, 'expected efficient rank %s, ' @@ -1251,18 +1171,9 @@ def test_check_finite(self): # later a1 = a.copy() b1 = b.copy() - try: - out = lstsq(a1, b1, lapack_driver=lapack_driver, - check_finite=check_finite, overwrite_a=overwrite, - overwrite_b=overwrite) - except LstsqLapackError: - if lapack_driver is None: - raise AssertionError('LstsqLapackError raised with ' - '"lapack_driver" being "None".') - else: - # can't proceed, - # skip to the next iteration - continue + out = lstsq(a1, b1, lapack_driver=lapack_driver, + check_finite=check_finite, overwrite_a=overwrite, + overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) @@ -1650,4 +1561,3 @@ def test_perm_and_scaling(self): ip[p] = np.arange(A.shape[0]) assert_allclose(y, np.diag(s)[ip, :]) assert_allclose(solve(y, A).dot(y), x) - From f8f7b3acf9195162677cb36d7e6200ae62a66d73 Mon Sep 17 00:00:00 2001 From: Sourav Singh <4314261+souravsingh@users.noreply.github.com> Date: Thu, 15 Nov 2018 12:18:44 +0530 Subject: [PATCH 06/70] DOC: Add docstring examples for rosen functions (#9478) * DOC: Add docstring examples for rosen functions --- scipy/optimize/optimize.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/scipy/optimize/optimize.py b/scipy/optimize/optimize.py index 3e5051336614..09594162897c 100644 --- a/scipy/optimize/optimize.py +++ b/scipy/optimize/optimize.py @@ -215,7 +215,14 @@ def rosen_der(x): See Also -------- rosen, rosen_hess, rosen_hess_prod - + + Examples + -------- + >>> from scipy.optimize import rosen_der + >>> X = 0.1 * np.arange(9) + >>> rosen_der(X) + array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ]) + """ x = asarray(x) xm = x[1:-1] @@ -246,7 +253,17 @@ def rosen_hess(x): See Also -------- rosen, rosen_der, rosen_hess_prod - + + Examples + -------- + >>> from scipy.optimize import rosen_hess + >>> X = 0.1 * np.arange(4) + >>> rosen_hess(X) + array([[-38., 0., 0., 0.], + [ 0., 134., -40., 0.], + [ 0., -40., 130., -80.], + [ 0., 0., -80., 200.]]) + """ x = atleast_1d(x) H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1) @@ -278,7 +295,15 @@ def rosen_hess_prod(x, p): See Also -------- rosen, rosen_der, rosen_hess - + + Examples + -------- + >>> from scipy.optimize import rosen_hess_prod + >>> X = 0.1 * np.arange(9) + >>> p = 0.5 * np.arange(9) + >>> rosen_hess_prod(X, p) + array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.]) + """ x = atleast_1d(x) Hp = numpy.zeros(len(x), dtype=x.dtype) From 3651d2d7d3380c4dcd80303ae50b563d38b4c037 Mon Sep 17 00:00:00 2001 From: Sourav Singh <4314261+souravsingh@users.noreply.github.com> Date: Thu, 15 Nov 2018 19:02:55 +0530 Subject: [PATCH 07/70] DOC: Add docstring example for ai_zeros and bi_zeros (#9479) * DOC: special: Add docstring example for ai_zeros and bi_zeros --- scipy/special/basic.py | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/scipy/special/basic.py b/scipy/special/basic.py index d1c77f2fd5ea..d6bca3a61163 100644 --- a/scipy/special/basic.py +++ b/scipy/special/basic.py @@ -1446,7 +1446,20 @@ def ai_zeros(nt): Values of Ai(x) evaluated at first `nt` zeros of Ai'(x) aip : ndarray Values of Ai'(x) evaluated at first `nt` zeros of Ai(x) - + + Examples + -------- + >>> from scipy import special + >>> a, ap, ai, aip = special.ai_zeros(3) + >>> a + array([-2.33810741, -4.08794944, -5.52055983]) + >>> ap + array([-1.01879297, -3.24819758, -4.82009921]) + >>> ai + array([ 0.53565666, -0.41901548, 0.38040647]) + >>> aip + array([ 0.70121082, -0.80311137, 0.86520403]) + References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special @@ -1484,7 +1497,20 @@ def bi_zeros(nt): Values of Bi(x) evaluated at first `nt` zeros of Bi'(x) bip : ndarray Values of Bi'(x) evaluated at first `nt` zeros of Bi(x) - + + Examples + -------- + >>> from scipy import special + >>> b, bp, bi, bip = special.bi_zeros(3) + >>> b + array([-1.17371322, -3.2710933 , -4.83073784]) + >>> bp + array([-2.29443968, -4.07315509, -5.51239573]) + >>> bi + array([-0.45494438, 0.39652284, -0.36796916]) + >>> bip + array([ 0.60195789, -0.76031014, 0.83699101]) + References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special From a2d53d33fc9e5ec69d43fc62a0a057d57a8f684f Mon Sep 17 00:00:00 2001 From: Dowon Date: Fri, 16 Nov 2018 13:48:11 +0900 Subject: [PATCH 08/70] TST Duplicate tests for geometric mean --- scipy/stats/tests/test_stats.py | 113 +++++++++++++++++++++----------- 1 file changed, 74 insertions(+), 39 deletions(-) diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index f38339533d83..7e9768f41ac5 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -3723,123 +3723,158 @@ def do(self, a, b, axis=None, dtype=None): assert_equal(x.dtype, dtype) -class GeoMeanTestCase: - def test_1dlist(self): +class GeoMeanTestMethod: + def equal_test(self, array_like, desired, axis=None, dtype=None, decimal=7): + # Note this doesn't test when axis is not specified + x = stats.gmean(array_like, axis=axis, dtype=dtype) + assert_almost_equal(desired, x, decimal=decimal) + assert_equal(x.dtype, dtype) + + def approx_test(self, array_like, desired, axis=None, dtype=None, significant=7): + x = stats.gmean(array_like, axis=axis, dtype=dtype) + assert_approx_equal(desired, x, significant=significant) + assert_equal(x.dtype, dtype) + +class TestGeoMean(GeoMeanTestMethod): + def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] b = 45.2872868812 - self.do(a, b) + self.equal_test(a, b) - def test_1darray(self): + a = [1, 2, 3, 4] + desired = power(1 * 2 * 3 * 4, 1. / 4.) + self.equal_test(a, desired, decimal=14) + + desired1 = stats.gmean(a, axis=-1) + self.equal_test(a, desired1, axis=-1) + + def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 45.2872868812 - self.do(a, b) + self.equal_test(a, b) - def test_1dma(self): + a = array([1, 2, 3, 4], float32) + desired = power(1 * 2 * 3 * 4, 1. / 4.) + self.equal_test(a, desired, dtype=float32) + + desired1 = stats.gmean(a, axis=-1) + self.equal_test(a, desired1, axis=-1, dtype=float32) + + def test_1d_ma(self): # Test a 1d masked array a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 45.2872868812 - self.do(a, b) + self.equal_test(a, b) - def test_1dmavalue(self): + def test_1d_ma_value(self): # Test a 1d masked array with a masked value a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1]) b = 41.4716627439 - self.do(a, b) + self.equal_test(a, b) # Note the next tests use axis=None as default, not axis=0 - def test_2dlist(self): + def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 - self.do(a, b) + self.equal_test(a, b) - def test_2darray(self): + def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 - self.do(np.array(a), b) + self.equal_test(array(a), b) - def test_2dma(self): + def test_2d_ma(self): # Test a 2d masked array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 - self.do(np.ma.array(a), b) + self.equal_test(np.ma.array(a), b) - def test_2daxis0(self): + def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) - self.do(a, b, axis=0) + self.equal_test(a, b, axis=0) - def test_2daxis1(self): + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + desired = array([1, 2, 3, 4]) + self.equal_test(a, desired, axis=0, decimal=14) + + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + desired = stats.gmean(a, axis=0) + self.equal_test(a, desired, axis=0, decimal=14) + + def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([22.13363839, 64.02171746, 104.40086817]) - self.do(a, b, axis=1) + self.equal_test(a, b, axis=1) - def test_2dmatrixdaxis0(self): + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + v = power(1 * 2 * 3 * 4, 1. / 4.) + desired = array([v, v, v]) + self.equal_test(a, desired, axis=1, decimal=14) + + def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) - self.do(np.matrix(a), b, axis=0) + self.equal_test(np.matrix(a), b, axis=0) - def test_2dmatrixaxis1(self): + def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T - self.do(np.matrix(a), b, axis=1) + self.equal_test(np.matrix(a), b, axis=1) - def test_1dlist0(self): + def test_large_values(self): + a = array([1e100, 1e200, 1e300]) + self.approx_test(a, 1e200, significant=13) + + def test_1d_list0(self): # Test a 1d list with zero element a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0] b = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.do(a, b) + self.equal_test(a, b) finally: np.seterr(**olderr) - def test_1darray0(self): + def test_1d_array0(self): # Test a 1d array with zero element a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) b = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.do(a, b) + self.equal_test(a, b) finally: np.seterr(**olderr) - def test_1dma0(self): + def test_1d_ma0(self): # Test a 1d masked array with zero element a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) b = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.do(a, b) + self.equal_test(a, b) finally: np.seterr(**olderr) - def test_1dmainf(self): + def test_1d_ma_inf(self): # Test a 1d masked array with negative element a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) b = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.do(a, b) + self.equal_test(a, b) finally: np.seterr(**olderr) -class TestGeoMean(GeoMeanTestCase): - def do(self, a, b, axis=None, dtype=None): - # Note this doesn't test when axis is not specified - x = stats.gmean(a, axis=axis, dtype=dtype) - assert_almost_equal(b, x) - assert_equal(x.dtype, dtype) - - def test_binomtest(): # precision tests compared to R for ticket:986 pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5), From 7bccecdedb11dadcfbc8d7d2c71ac64bb2d87a63 Mon Sep 17 00:00:00 2001 From: Dowon Date: Fri, 16 Nov 2018 15:50:32 +0900 Subject: [PATCH 09/70] Removing duplicates --- scipy/stats/tests/test_stats.py | 48 +-------------------------------- 1 file changed, 1 insertion(+), 47 deletions(-) diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index 7e9768f41ac5..ca4f81c83624 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -1231,52 +1231,6 @@ def test_relfreq(): assert_array_almost_equal(relfreqs, relfreqs2) -class TestGMean(object): - - def test_1D_list(self): - a = (1,2,3,4) - actual = stats.gmean(a) - desired = power(1*2*3*4,1./4.) - assert_almost_equal(actual, desired,decimal=14) - - desired1 = stats.gmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - - def test_1D_array(self): - a = array((1,2,3,4), float32) - actual = stats.gmean(a) - desired = power(1*2*3*4,1./4.) - assert_almost_equal(actual, desired, decimal=7) - - desired1 = stats.gmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=7) - - def test_2D_array_default(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - actual = stats.gmean(a) - desired = array((1,2,3,4)) - assert_array_almost_equal(actual, desired, decimal=14) - - desired1 = stats.gmean(a,axis=0) - assert_array_almost_equal(actual, desired1, decimal=14) - - def test_2D_array_dim1(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - actual = stats.gmean(a, axis=1) - v = power(1*2*3*4,1./4.) - desired = array((v,v,v)) - assert_array_almost_equal(actual, desired, decimal=14) - - def test_large_values(self): - a = array([1e100, 1e200, 1e300]) - actual = stats.gmean(a) - assert_approx_equal(actual, 1e200, significant=13) - - class TestHMean(object): def test_1D_list(self): a = (1,2,3,4) @@ -3723,7 +3677,7 @@ def do(self, a, b, axis=None, dtype=None): assert_equal(x.dtype, dtype) -class GeoMeanTestMethod: +class GeoMeanTestMethod(object): def equal_test(self, array_like, desired, axis=None, dtype=None, decimal=7): # Note this doesn't test when axis is not specified x = stats.gmean(array_like, axis=axis, dtype=dtype) From 0eaca10632d60f9b1d9f2e6e7cd24cf8f135989f Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Thu, 15 Nov 2018 23:01:22 -0500 Subject: [PATCH 10/70] MAINT: stats: Improve implementation of chi2 ppf method. The old implementation, self._isf(1.0-p, df), loses precision for very small p. The formulation 2*special.gammaincinv(df/2, p) avoids the loss of precision. --- scipy/stats/_continuous_distns.py | 2 +- scipy/stats/tests/test_distributions.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/scipy/stats/_continuous_distns.py b/scipy/stats/_continuous_distns.py index 20d02b7e3141..2ab06f8434c8 100644 --- a/scipy/stats/_continuous_distns.py +++ b/scipy/stats/_continuous_distns.py @@ -1082,7 +1082,7 @@ def _isf(self, p, df): return sc.chdtri(df, p) def _ppf(self, p, df): - return self._isf(1.0-p, df) + return 2*sc.gammaincinv(df/2, p) def _stats(self, df): mu = df diff --git a/scipy/stats/tests/test_distributions.py b/scipy/stats/tests/test_distributions.py index 000ca846d08c..08e20850f3ab 100644 --- a/scipy/stats/tests/test_distributions.py +++ b/scipy/stats/tests/test_distributions.py @@ -1450,6 +1450,20 @@ def test_precision(self): assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, decimal=14) + def test_ppf(self): + # Expected values computed with mpmath. + df = 4.8 + x = stats.chi2.ppf(2e-47, df) + assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10) + x = stats.chi2.ppf(0.5, df) + assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10) + + df = 13 + x = stats.chi2.ppf(2e-77, df) + assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10) + x = stats.chi2.ppf(0.1, df) + assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10) + class TestGumbelL(object): # gh-6228 From 195b782d26a72e502d7fb26af9301bc33c827174 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Fri, 16 Nov 2018 14:41:50 -0800 Subject: [PATCH 11/70] TST: 32-bit test_distance.py now passes * the floating point precision stringency of test_pdist_jensenshannon_iris_nonC() has been adjusted to allow it to pass with a 32-bit Python interpreter, as required by the wheels infrastructure --- scipy/spatial/tests/test_distance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/spatial/tests/test_distance.py b/scipy/spatial/tests/test_distance.py index 0fcb745b65c8..56911d04e77f 100644 --- a/scipy/spatial/tests/test_distance.py +++ b/scipy/spatial/tests/test_distance.py @@ -1157,7 +1157,7 @@ def test_pdist_jensenshannon_iris_float32(self): _assert_within_tol(Y_test1, Y_right, eps, verbose > 2) def test_pdist_jensenshannon_iris_nonC(self): - eps = 5e-13 + eps = 5e-12 X = eo['iris'] Y_right = eo['pdist-jensenshannon-iris'] Y_test2 = pdist(X, 'test_jensenshannon') From 3f0b8199fdbf932b1a2738ac7b282137db3d585e Mon Sep 17 00:00:00 2001 From: Paul van Mulbregt Date: Sat, 17 Nov 2018 16:39:56 -0500 Subject: [PATCH 12/70] DOC: Replace Scipy with SciPy in the rst doc files for consistency. Many rst files use the terms "SciPy" in one part of the document and "Scipy" in a later. Change all "Scipy" to "SciPy". --- CONTRIBUTING.rst | 2 +- benchmarks/README.rst | 12 ++++----- doc/source/building/macosx.rst | 2 +- doc/source/ccallback.rst | 2 +- doc/source/dev/deprecations.rst | 2 +- doc/source/dev/distributing.rst | 44 +++++++++++++++---------------- doc/source/dev/github.rst | 2 +- doc/source/dev/licensing.rst | 12 ++++----- doc/source/dev/releasing.rst | 6 ++--- doc/source/dev/versioning.rst | 6 ++--- doc/source/tutorial/basic.rst | 4 +-- doc/source/tutorial/fftpack.rst | 20 +++++++------- doc/source/tutorial/general.rst | 2 +- doc/source/tutorial/integrate.rst | 2 +- doc/source/tutorial/io.rst | 8 +++--- doc/source/tutorial/signal.rst | 6 ++--- doc/source/tutorial/special.rst | 2 +- 17 files changed, 67 insertions(+), 67 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 00679bd19e33..d861a48f45a6 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -2,7 +2,7 @@ SciPy pull request guidelines ============================= -Pull requests are always welcome, and the Scipy community appreciates +Pull requests are always welcome, and the SciPy community appreciates any help you give. Note that a code of conduct applies to all spaces managed by the SciPy project, including issues and pull requests: https://github.com/scipy/scipy/blob/master/doc/source/dev/conduct/code_of_conduct.rst. diff --git a/benchmarks/README.rst b/benchmarks/README.rst index fb38acd82d3f..581b5a5a0e22 100644 --- a/benchmarks/README.rst +++ b/benchmarks/README.rst @@ -4,7 +4,7 @@ SciPy benchmarks ================ -Benchmarking Scipy with Airspeed Velocity. +Benchmarking SciPy with Airspeed Velocity. Usage @@ -12,11 +12,11 @@ Usage Airspeed Velocity manages building and Python virtualenvs by itself, unless told otherwise. Some of the benchmarking features in -``runtests.py`` also tell ASV to use the Scipy compiled by +``runtests.py`` also tell ASV to use the SciPy compiled by ``runtests.py``. To run the benchmarks, you do not need to install a -development version of Scipy to your current Python environment. +development version of SciPy to your current Python environment. -Run a benchmark against currently checked out Scipy version (don't record the +Run a benchmark against currently checked out SciPy version (don't record the result):: python runtests.py --bench sparse.Arithmetic @@ -54,7 +54,7 @@ See `ASV documentation`_ for basics on how to write benchmarks. Some things to consider: -- When importing things from Scipy on the top of the test files, do it as:: +- When importing things from SciPy on the top of the test files, do it as:: try: from scipy.sparse.linalg import onenormest @@ -62,7 +62,7 @@ Some things to consider: pass The benchmark files need to be importable also when benchmarking old versions - of Scipy. The benchmarks themselves don't need any guarding against missing + of SciPy. The benchmarks themselves don't need any guarding against missing features --- only the top-level imports. - Try to keep the runtime of the benchmark reasonable. diff --git a/doc/source/building/macosx.rst b/doc/source/building/macosx.rst index bed3be05a7c2..7bae54709a1e 100644 --- a/doc/source/building/macosx.rst +++ b/doc/source/building/macosx.rst @@ -44,7 +44,7 @@ You will also need to install a library providing the BLAS and LAPACK interfaces. ATLAS, OpenBLAS, and MKL all work. OpenBLAS can be installed via `Homebrew `. -As of Scipy version 1.2.0, we do not support compiling against the system +As of SciPy version 1.2.0, we do not support compiling against the system Accelerate library for BLAS and LAPACK. It does not support a sufficiently recent LAPACK interface. diff --git a/doc/source/ccallback.rst b/doc/source/ccallback.rst index f3da292b3dd7..c44906393439 100644 --- a/doc/source/ccallback.rst +++ b/doc/source/ccallback.rst @@ -9,7 +9,7 @@ can either be python callables or low-level compiled functions. Using compiled callback functions can improve performance somewhat by avoiding wrapping data in Python objects. -Such low-level functions in Scipy are wrapped in `LowLevelCallable` +Such low-level functions in SciPy are wrapped in `LowLevelCallable` objects, which can be constructed from function pointers obtained from ctypes, cffi, Cython, or contained in Python `PyCapsule` objects. diff --git a/doc/source/dev/deprecations.rst b/doc/source/dev/deprecations.rst index ef9061dfa701..06a5270f5f97 100644 --- a/doc/source/dev/deprecations.rst +++ b/doc/source/dev/deprecations.rst @@ -3,7 +3,7 @@ Deprecations There are various reasons for wanting to remove existing functionality: it's buggy, the API isn't understandable, it's superseded by functionality with -better performance, it needs to be moved to another Scipy submodule, etc. +better performance, it needs to be moved to another SciPy submodule, etc. In general it's not a good idea to remove something without warning users about that removal first. Therefore this is what should be done before removing diff --git a/doc/source/dev/distributing.rst b/doc/source/dev/distributing.rst index 4de55590a1f9..a8e288fc20eb 100644 --- a/doc/source/dev/distributing.rst +++ b/doc/source/dev/distributing.rst @@ -2,16 +2,16 @@ Distributing ============ Distributing Python packages is nontrivial - especially for a package with -complex build requirements like Scipy - and subject to change. For an up-to-date +complex build requirements like SciPy - and subject to change. For an up-to-date overview of recommended tools and techniques, see the `Python Packaging User Guide`_. This document discusses some of the main issues and considerations for -Scipy. +SciPy. Dependencies ------------ Dependencies are things that a user has to install in order to use (or build/test) a package. They usually cause trouble, especially if they're not -optional. Scipy tries to keep its dependencies to a minimum; currently they +optional. SciPy tries to keep its dependencies to a minimum; currently they are: *Unconditional run-time dependencies:* @@ -43,7 +43,7 @@ are: - LaTeX (pdf docs) - Pillow_ (docs) -Furthermore of course one needs C, C++ and Fortran compilers to build Scipy, +Furthermore of course one needs C, C++ and Fortran compilers to build SciPy, but those we don't consider to be dependencies and are therefore not discussed here. For details, see https://scipy.github.io/devdocs/building/. @@ -53,18 +53,18 @@ scipy) the package instead. For example, six_ and decorator_ are vendored in ``scipy._lib``. The only dependency that is reported to pip_ is Numpy_, see -``install_requires`` in Scipy's main ``setup.py``. The other dependencies -aren't needed for Scipy to function correctly, and the one unconditional build +``install_requires`` in SciPy's main ``setup.py``. The other dependencies +aren't needed for SciPy to function correctly, and the one unconditional build dependency that pip_ knows how to install (Cython_) we prefer to treat like a compiler rather than a Python package that pip_ is allowed to upgrade. Issues with dependency handling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are some serious issues with how Python packaging tools handle -dependencies reported by projects. Because Scipy gets regular bug reports +dependencies reported by projects. Because SciPy gets regular bug reports about this, we go in a bit of detail here. -Scipy only reports its dependency on Numpy via ``install_requires`` if Numpy +SciPy only reports its dependency on Numpy via ``install_requires`` if Numpy isn't installed at all on a system. This will only change when there are either 32-bit and 64-bit Windows wheels for Numpy on PyPI or when ``pip upgrade`` becomes available (with sane behavior, unlike ``pip install @@ -75,7 +75,7 @@ either 32-bit and 64-bit Windows wheels for Numpy on PyPI or when The situation with ``setup_requires`` is even worse; pip_ doesn't handle that keyword at all, while ``setuptools`` has issues (here's a `current one `_) and invokes -``easy_install`` which comes with its own set of problems (note that Scipy doesn't +``easy_install`` which comes with its own set of problems (note that SciPy doesn't support ``easy_install`` at all anymore; issues specific to it will be closed as "wontfix"). @@ -84,19 +84,19 @@ as "wontfix"). Supported Python and Numpy versions ----------------------------------- -The Python_ versions that Scipy supports are listed in the list of PyPI +The Python_ versions that SciPy supports are listed in the list of PyPI classifiers in ``setup.py``, and mentioned in the release notes for each release. All newly released Python versions will be supported as soon as possible. The general policy on dropping support for a Python version is that (a) usage of that version has to be quite low (say <5% of users) and (b) the version isn't included in an active long-term support release of one of the -main Linux distributions anymore. Scipy typically follows Numpy, which has a +main Linux distributions anymore. SciPy typically follows Numpy, which has a similar policy. The final decision on dropping support is always taken on the scipy-dev mailing list. -The lowest supported Numpy_ version for a Scipy version is mentioned in the +The lowest supported Numpy_ version for a SciPy version is mentioned in the release notes and is encoded in ``scipy/__init__.py`` and the -``install_requires`` field of ``setup.py``. Typically the latest Scipy release +``install_requires`` field of ``setup.py``. Typically the latest SciPy release supports 3 or 4 minor versions of Numpy. That may become more if the frequency of Numpy releases increases (it's about 1x/year at the time of writing). Support for a particular Numpy version is typically dropped if (a) that Numpy @@ -105,7 +105,7 @@ is starting to outweigh the benefits. The final decision on dropping support is always taken on the scipy-dev mailing list. Supported versions of optional dependencies and compilers is less clearly -documented, and also isn't tested well or at all by Scipy's Continuous +documented, and also isn't tested well or at all by SciPy's Continuous Integration setup. Issues regarding this are dealt with as they come up in the issue tracker or mailing list. @@ -114,8 +114,8 @@ Building binary installers -------------------------- .. note:: - This section is only about building Scipy binary installers to *distribute*. - For info on building Scipy on the same machine as where it will be used, see + This section is only about building SciPy binary installers to *distribute*. + For info on building SciPy on the same machine as where it will be used, see `this scipy.org page `_. There are a number of things to take into consideration when building binaries @@ -132,7 +132,7 @@ and distributing them on PyPI or elsewhere. **Windows** - The currently most easily available toolchain for building - Python.org compatible binaries for Scipy is installing MSVC (see + Python.org compatible binaries for SciPy is installing MSVC (see https://wiki.python.org/moin/WindowsCompilers) and mingw64-gfortran. Support for this configuration requires numpy.distutils from Numpy >= 1.14.dev and a gcc/gfortran-compiled static ``openblas.a``. @@ -140,7 +140,7 @@ and distributing them on PyPI or elsewhere. https://github.com/MacPython/scipy-wheels - For 64-bit Windows installers built with a free toolchain, use the method documented at https://github.com/numpy/numpy/wiki/Mingw-static-toolchain. - That method will likely be used for Scipy itself once it's clear that the + That method will likely be used for SciPy itself once it's clear that the maintenance of that toolchain is sustainable long-term. See the MingwPy_ project and `this thread `_ for @@ -151,13 +151,13 @@ and distributing them on PyPI or elsewhere. `this article `_ and for (partial) MSVC instructions see `this wiki page `_. -- Older Scipy releases contained a .exe "superpack" installer. Those contain +- Older SciPy releases contained a .exe "superpack" installer. Those contain 3 complete builds (no SSE, SSE2, SSE3), and were built with https://github.com/numpy/numpy-vendor. That build setup is known to not work well anymore and is no longer supported. It used g77 instead of gfortran, due to complex DLL distribution issues (see `gh-2829 `_). Because the toolchain is no - longer supported, g77 support isn't needed anymore and Scipy can now include + longer supported, g77 support isn't needed anymore and SciPy can now include Fortran 90/95 code. **OS X** @@ -166,13 +166,13 @@ and distributing them on PyPI or elsewhere. python.org, Homebrew, MacPython), use the build method provided by https://github.com/MacPython/scipy-wheels. - DMG installers for the Python from python.org on OS X can still be produced - by ``tools/scipy-macosx-installer/``. Scipy doesn't distribute those + by ``tools/scipy-macosx-installer/``. SciPy doesn't distribute those installers anymore though, now that there are binary wheels on PyPi. **Linux** - PyPi-compatible Linux wheels can be produced via the manylinux_ project. - The corresponding build setup for TravisCI for Scipy is set up in + The corresponding build setup for TravisCI for SciPy is set up in https://github.com/MacPython/scipy-wheels. Other Linux build-setups result to PyPi incompatible wheels, which diff --git a/doc/source/dev/github.rst b/doc/source/dev/github.rst index 0af7055cb973..37123a74728c 100644 --- a/doc/source/dev/github.rst +++ b/doc/source/dev/github.rst @@ -51,7 +51,7 @@ Dealing with pull requests - Make sure that the labels and milestone on a merged PR are set correctly. - When you want to reject a PR: if it's very obvious you can just close it and explain why, if not obvious then it's a good idea to first explain why you - think the PR is not suitable for inclusion in Scipy and then let a second + think the PR is not suitable for inclusion in SciPy and then let a second committer comment or close. diff --git a/doc/source/dev/licensing.rst b/doc/source/dev/licensing.rst index 5e3c46e776b1..0573093bbc6a 100644 --- a/doc/source/dev/licensing.rst +++ b/doc/source/dev/licensing.rst @@ -1,18 +1,18 @@ Licensing ========= -Scipy is distributed under the modified (3-clause) BSD license. All code, -documentation and other files added to Scipy by contributors is licensed under +SciPy is distributed under the modified (3-clause) BSD license. All code, +documentation and other files added to SciPy by contributors is licensed under this license, unless another license is explicitly specified in the source code. Contributors keep the copyright for code they wrote and submit for -inclusion to Scipy. +inclusion to SciPy. -Other licenses that are compatible with the modified BSD license that Scipy +Other licenses that are compatible with the modified BSD license that SciPy uses are 2-clause BSD, MIT and PSF. Incompatible licenses are GPL, Apache and custom licenses that require attribution/citation or prohibit use for commercial purposes. It regularly happens that PRs are submitted with content copied or derived from -unlicensed code. Such contributions cannot be accepted for inclusion in Scipy. +unlicensed code. Such contributions cannot be accepted for inclusion in SciPy. What is needed in such cases is to contact the original author and ask him to relicense his code under the modified BSD (or a compatible) license. If the original author agrees to this, add a comment saying so to the source files and @@ -20,6 +20,6 @@ forward the relevant email to the scipy-dev mailing list. What also regularly happens is that code is translated or derived from code in R, Octave (both GPL-licensed) or a commercial application. Such code also -cannot be included in Scipy. Simply implementing functionality with the same +cannot be included in SciPy. Simply implementing functionality with the same API as found in R/Octave/... is fine though, as long as the author doesn't look at the original incompatibly-licensed source code. diff --git a/doc/source/dev/releasing.rst b/doc/source/dev/releasing.rst index 1c062aaea33e..fd056181f1c5 100644 --- a/doc/source/dev/releasing.rst +++ b/doc/source/dev/releasing.rst @@ -4,7 +4,7 @@ Making a SciPy release ====================== At the highest level, this is what the release manager does to release a new -Scipy version: +SciPy version: #. Propose a release schedule on the scipy-dev mailing list. #. Create the maintenance branch for the release. @@ -85,7 +85,7 @@ and that the release notes are up-to-date and included in the html docs. Then edit ``setup.py`` to get the correct version number (set ``ISRELEASED = True``) and commit it with a message like ``REL: set version to -``. Don't push this commit to the Scipy repo yet. +``. Don't push this commit to the SciPy repo yet. Finally tag the release locally with ``git tag -s `` (the ``-s`` ensures the tag is signed). Continue with building release artifacts (next section). @@ -179,7 +179,7 @@ doc server is needed; ask @pv (server admin) or @rgommers (can upload) if you don't have that. The sources for the website itself are maintained in -https://github.com/scipy/docs.scipy.org/. Add the new Scipy version in the +https://github.com/scipy/docs.scipy.org/. Add the new SciPy version in the table of releases in ``index.rst``. Push that commit, then do ``make upload USERNAME=yourusername``. diff --git a/doc/source/dev/versioning.rst b/doc/source/dev/versioning.rst index decd57a04d21..00456cd5ee95 100644 --- a/doc/source/dev/versioning.rst +++ b/doc/source/dev/versioning.rst @@ -1,6 +1,6 @@ Version numbering ================= -Scipy version numbering complies to `PEP 440`_. Released final versions, which +SciPy version numbering complies to `PEP 440`_. Released final versions, which are the only versions appearing on `PyPI`_, are numbered ``MAJOR.MINOR.MICRO`` where: @@ -18,7 +18,7 @@ Released alpha, beta and rc (release candidate) versions are numbered like final versions but with postfixes ``a#``, ``b#`` and ``rc#`` respectively, with ``#`` an integer. Development versions are postfixed with ``.dev0+``. -Examples of valid Scipy version strings are:: +Examples of valid SciPy version strings are:: 0.16.0 0.15.1 @@ -27,7 +27,7 @@ Examples of valid Scipy version strings are:: 0.14.0rc1 0.17.0.dev0+ac53f09 -An installed Scipy version contains these version identifiers:: +An installed SciPy version contains these version identifiers:: scipy.__version__ # complete version string, including git commit hash for dev versions scipy.version.short_version # string, only major.minor.micro diff --git a/doc/source/tutorial/basic.rst b/doc/source/tutorial/basic.rst index 2d0026c4ef91..2ca452732ba8 100644 --- a/doc/source/tutorial/basic.rst +++ b/doc/source/tutorial/basic.rst @@ -10,7 +10,7 @@ Basic functions Interaction with Numpy ---------------------- -Scipy builds on Numpy, and for all basic array handling needs you can +SciPy builds on Numpy, and for all basic array handling needs you can use Numpy functions: >>> import numpy as np @@ -22,7 +22,7 @@ Rather than giving a detailed description of each of these functions will discuss some of the more useful commands which require a little introduction to use to their full potential. -To use functions from some of the Scipy modules, you can do: +To use functions from some of the SciPy modules, you can do: >>> from scipy import some_module >>> some_module.some_function() diff --git a/doc/source/tutorial/fftpack.rst b/doc/source/tutorial/fftpack.rst index 8bb7cd5bafcf..edcfcd2504be 100644 --- a/doc/source/tutorial/fftpack.rst +++ b/doc/source/tutorial/fftpack.rst @@ -1,7 +1,7 @@ Fourier Transforms (:mod:`scipy.fftpack`) ========================================= -.. sectionauthor:: Scipy Developers +.. sectionauthor:: SciPy Developers .. currentmodule:: scipy.fftpack @@ -23,7 +23,7 @@ Fourier analysis and its applications. PyFFTW_ provides a way to replace a number of functions in `scipy.fftpack` with its own functions, which are usually significantly faster, via pyfftw.interfaces_. Because PyFFTW_ relies on the GPL-licensed FFTW_ it - cannot be included in Scipy. Users for whom the speed of FFT routines is + cannot be included in SciPy. Users for whom the speed of FFT routines is critical should consider installing PyFFTW_. @@ -255,7 +255,7 @@ arrays in frequency domain. Discrete Cosine Transforms -------------------------- -Scipy provides a DCT with the function :func:`dct` and a corresponding IDCT +SciPy provides a DCT with the function :func:`dct` and a corresponding IDCT with the function :func:`idct`. There are 8 types of the DCT [WPC]_, [Mak]_; however, only the first 3 types are implemented in scipy. "The" DCT generally refers to DCT type 2, and "the" Inverse DCT generally refers to DCT type 3. In @@ -270,7 +270,7 @@ MATLAB dct(x). Type I DCT __________ -Scipy uses the following definition of the unnormalized DCT-I +SciPy uses the following definition of the unnormalized DCT-I (``norm='None'``): .. math:: @@ -285,7 +285,7 @@ DCT-I is only supported for input size > 1 Type II DCT ___________ -Scipy uses the following definition of the unnormalized DCT-II +SciPy uses the following definition of the unnormalized DCT-II (``norm='None'``): .. math:: @@ -312,7 +312,7 @@ In this case, the DCT "base functions" :math:`\phi_k[n] = 2 f \cos Type III DCT ____________ -Scipy uses the following definition of the unnormalized DCT-III +SciPy uses the following definition of the unnormalized DCT-III (``norm='None'``): .. math:: @@ -403,7 +403,7 @@ provides a five-fold compression rate. Discrete Sine Transforms ------------------------ -Scipy provides a DST [Mak]_ with the function :func:`dst` and a corresponding IDST +SciPy provides a DST [Mak]_ with the function :func:`dst` and a corresponding IDST with the function :func:`idst`. There are theoretically 8 types of the DST for different combinations of @@ -413,7 +413,7 @@ types are implemented in scipy. Type I DST __________ -DST-I assumes the input is odd around n=-1 and n=N. Scipy uses the following +DST-I assumes the input is odd around n=-1 and n=N. SciPy uses the following definition of the unnormalized DST-I (``norm='None'``): .. math:: @@ -428,7 +428,7 @@ own inverse, up to a factor `2(N+1)`. Type II DST ___________ -DST-II assumes the input is odd around n=-1/2 and even around n=N. Scipy uses +DST-II assumes the input is odd around n=-1/2 and even around n=N. SciPy uses the following definition of the unnormalized DST-II (``norm='None'``): .. math:: @@ -439,7 +439,7 @@ the following definition of the unnormalized DST-II (``norm='None'``): Type III DST ____________ -DST-III assumes the input is odd around n=-1 and even around n=N-1. Scipy uses +DST-III assumes the input is odd around n=-1 and even around n=N-1. SciPy uses the following definition of the unnormalized DST-III (``norm='None'``): .. math:: diff --git a/doc/source/tutorial/general.rst b/doc/source/tutorial/general.rst index 7d4d318ad447..2c1a91820674 100644 --- a/doc/source/tutorial/general.rst +++ b/doc/source/tutorial/general.rst @@ -69,7 +69,7 @@ Subpackage Description :mod:`stats` Statistical distributions and functions ================== ====================================================== -Scipy sub-packages need to be imported separately, for example:: +SciPy sub-packages need to be imported separately, for example:: >>> from scipy import linalg, optimize diff --git a/doc/source/tutorial/integrate.rst b/doc/source/tutorial/integrate.rst index a163128b770d..0518d5596dff 100644 --- a/doc/source/tutorial/integrate.rst +++ b/doc/source/tutorial/integrate.rst @@ -356,7 +356,7 @@ different file extension. A library has now been created that can be loaded into Python with `ctypes`. 3.) Load shared library into Python using `ctypes` and set ``restypes`` and -``argtypes`` - this allows Scipy to interpret the function correctly: +``argtypes`` - this allows SciPy to interpret the function correctly: .. code:: python diff --git a/doc/source/tutorial/io.rst b/doc/source/tutorial/io.rst index 6b3391088025..018a511c7a71 100644 --- a/doc/source/tutorial/io.rst +++ b/doc/source/tutorial/io.rst @@ -44,8 +44,8 @@ From time to time you may find yourself re-using this machinery. How do I start? ``````````````` -You may have a ``.mat`` file that you want to read into Scipy. Or, you -want to pass some variables from Scipy / Numpy into MATLAB. +You may have a ``.mat`` file that you want to read into SciPy. Or, you +want to pass some variables from SciPy / Numpy into MATLAB. To save us using a MATLAB license, let's start in Octave_. Octave has MATLAB-compatible save and load functions. Start Octave (``octave`` at @@ -171,7 +171,7 @@ We can load this in Python: >>> val.dtype dtype([('field1', 'O'), ('field2', 'O')]) -In versions of Scipy from 0.12.0, MATLAB structs come back as numpy +In versions of SciPy from 0.12.0, MATLAB structs come back as numpy structured arrays, with fields named for the struct fields. You can see the field names in the ``dtype`` output above. Note also: @@ -187,7 +187,7 @@ and: 1 1 So, in MATLAB, the struct array must be at least 2D, and we replicate -that when we read into Scipy. If you want all length 1 dimensions +that when we read into SciPy. If you want all length 1 dimensions squeezed out, try this: >>> mat_contents = sio.loadmat('octave_struct.mat', squeeze_me=True) diff --git a/doc/source/tutorial/signal.rst b/doc/source/tutorial/signal.rst index 56d9d8e29a05..b2ecbe7820c3 100644 --- a/doc/source/tutorial/signal.rst +++ b/doc/source/tutorial/signal.rst @@ -512,7 +512,7 @@ Filter Design Time-discrete filters can be classified into finite response (FIR) filters and infinite response (IIR) filters. FIR filters can provide a linear phase -response, whereas IIR filters cannot. Scipy provides functions +response, whereas IIR filters cannot. SciPy provides functions for designing both types of filters. FIR Filter @@ -579,7 +579,7 @@ Nyquist frequency in :func:`firwin2` and :func:`freqz` (as explained above). IIR Filter """""""""" -Scipy provides two functions to directly design IIR :func:`iirdesign` and +SciPy provides two functions to directly design IIR :func:`iirdesign` and :func:`iirfilter` where the filter type (e.g. elliptic) is passed as an argument and several more filter design functions for specific filter types; e.g. :func:`ellip`. @@ -1044,7 +1044,7 @@ implementation. Detrend ------- -Scipy provides the function :func:`detrend` to remove a constant or linear +SciPy provides the function :func:`detrend` to remove a constant or linear trend in a data series in order to see effect of higher order. The example below removes the constant and linear trend of a 2-nd order diff --git a/doc/source/tutorial/special.rst b/doc/source/tutorial/special.rst index 8d7cd97bef1c..017e6c6630f5 100644 --- a/doc/source/tutorial/special.rst +++ b/doc/source/tutorial/special.rst @@ -66,7 +66,7 @@ Cython Bindings for Special Functions (:mod:`scipy.special.cython_special`) .. highlight:: cython -Scipy also offers Cython bindings for scalar, typed versions of many +SciPy also offers Cython bindings for scalar, typed versions of many of the functions in special. The following Cython code gives a simple example of how to use these functions:: From ef412038216aa6ca584d3e0d34f0417510e46a62 Mon Sep 17 00:00:00 2001 From: JakobStruye Date: Sun, 18 Nov 2018 15:32:21 +0100 Subject: [PATCH 13/70] MAINT: scipy.sparse: Fixed error when reshaping to illegal shape (#9472) --- scipy/sparse/sputils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index b528456c2836..10d626d7006c 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -294,7 +294,7 @@ def check_shape(args, current_shape=None): new_size = np.prod(new_shape, dtype=int) if new_size != current_size: raise ValueError('cannot reshape array of size {} into shape {}' - .format(new_size, new_shape)) + .format(current_size, new_shape)) elif len(negative_indexes) == 1: skip = negative_indexes[0] specified = np.prod(new_shape[0:skip] + new_shape[skip+1:]) From 97bc4c3e5d1d8b8d5bf14581b86a1a54861f2353 Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Sun, 18 Nov 2018 19:31:40 +0100 Subject: [PATCH 14/70] DOC: Add the correct terminal type for MSYS2 [ci skip] --- doc/source/building/windows.rst | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/doc/source/building/windows.rst b/doc/source/building/windows.rst index 665e3146a954..7787c81fa2c5 100644 --- a/doc/source/building/windows.rst +++ b/doc/source/building/windows.rst @@ -76,7 +76,12 @@ all via hitting enter key at each step. We should be aware of the fact that these tools also install Python2, very similar to a virtual environment, which is only usable within an MSYS2 terminal and we are **not** -going to use it at any point. +going to use it at any point. After updating, now we are going to use the build +toolchain that we have installed in the previous step. Depending on 32/64bit choice, +we will switch to another shell that MSYS2 created. In your start menu you should +see three MSYS2 terminal shortcuts. Select the one with either 64 or 32bit indicator. +The reason why we do this is that the toolchain and compilers are available to these +shells and not to the standard MSYS2 terminal. If you already have a GitHub repository folder where you keep your own repos, it is better to use that location to keep things nice and tidy since we are going to clone yet another @@ -146,10 +151,10 @@ Installing OpenBLAS =================== If you see that line, then you might have OpenBLAS correctly built, even if other failures -occurred. Look in that folder for :code:`openblas.a`. If you find a file called something -like :code:`libopenblas_5f998ef_gcc7_2_0.a`, just rename it to :code:`openblas.a` and continue. -If the file isn't there, then poke around and try to find the file elsewhere in -:code:`OPENBLAS_ROOT`. If you don't have that file, you'll probably need to find out what +might have occurred. Look in that folder for :code:`openblas.a`. If you find a file called +something like :code:`libopenblas_5f998ef_gcc7_2_0.a`, just rename it to :code:`openblas.a` +and continue. If the file isn't there, then poke around and try to find the file elsewhere +in :code:`OPENBLAS_ROOT`. If you don't have that file, you'll probably need to find out what happened and then build OpenBLAS again. But if you have that file, we'll assume that you've completed this step correctly. Proceeding on that assumption, let's build SciPy. @@ -217,13 +222,10 @@ Now install the dependencies that we need to build and test SciPy. **It's import specify the full path to the native Python interpreter so that the built-in MSYS2 Python will not be used. Attempting to build with the MSYS2 Python will not work correctly.** -**18 October 2017** Until NumPy 1.14 is officially released, we have to work with the latest -development version of the NumPy repository. See the NumPy documentation.... - .. code:: shell /c/Users//AppData/Local/Programs/Python/Python36/python.exe \ - -m pip install numpy cython pytest pytest-xdist pytest-faulthandler + -m pip install numpy>=1.14.0 cython pytest pytest-xdist pytest-faulthandler Please note that this is a simpler procedure than what is used for the official binaries. **Your binaries will only work with the latest NumPy (v1.14.0dev and higher)**. For From ef720a26a1019ab9564e3c8dcb14932dcc33b55a Mon Sep 17 00:00:00 2001 From: Paul van Mulbregt Date: Sun, 18 Nov 2018 21:33:51 -0500 Subject: [PATCH 15/70] DOC: Replace "Scipy" with "SciPy" in .py and .txt doc files for consistency. Explicitly excluded are the old release notes: doc/release/*-notes.rst. --- doc/API.rst.txt | 12 +++---- doc/README.txt | 4 +-- doc/ROADMAP.rst.txt | 2 +- scipy/__init__.py | 2 +- scipy/_build_utils/__init__.py | 2 +- scipy/cluster/hierarchy.py | 12 +++---- scipy/io/matlab/mio4.py | 2 +- scipy/linalg/decomp_lu.py | 2 +- scipy/linalg/decomp_qr.py | 2 +- scipy/odr/tests/test_odr.py | 2 +- scipy/optimize/_remove_redundancy.py | 2 +- scipy/optimize/_trustregion_exact.py | 2 +- scipy/optimize/nonlin.py | 6 ++-- scipy/optimize/optimize.py | 2 +- scipy/optimize/tests/test_optimize.py | 36 +++++++++---------- scipy/signal/signaltools.py | 2 +- scipy/sparse/_matrix_io.py | 2 +- scipy/sparse/linalg/isolve/_gcrotmk.py | 2 +- scipy/sparse/linalg/isolve/lgmres.py | 2 +- .../linalg/isolve/tests/test_iterative.py | 4 +-- scipy/sparse/linalg/isolve/tests/test_lsmr.py | 2 +- scipy/sparse/sparsetools.py | 2 +- scipy/sparse/tests/test_matrix_io.py | 2 +- scipy/spatial/_spherical_voronoi.py | 2 +- scipy/spatial/distance.py | 2 +- scipy/special/orthogonal.py | 2 +- scipy/special/tests/test_mpmath.py | 4 +-- scipy/stats/kde.py | 4 +-- scipy/stats/tests/test_stats.py | 2 +- 29 files changed, 62 insertions(+), 62 deletions(-) diff --git a/doc/API.rst.txt b/doc/API.rst.txt index 5460ccd1bffa..a5f22c3a7ebb 100644 --- a/doc/API.rst.txt +++ b/doc/API.rst.txt @@ -1,4 +1,4 @@ -API - importing from Scipy +API - importing from SciPy ========================== In Python the distinction between what is the public API of a library and what @@ -25,12 +25,12 @@ rules for what is and isn't public in Python are: case; the presence of underscores do mark something as private, but the absence of underscores do not mark something as public. -In Scipy there are modules whose names don't start with an underscore, but that +In SciPy there are modules whose names don't start with an underscore, but that should be considered private. To clarify which modules these are we define -below what the public API is for Scipy, and give some recommendations for how -to import modules/functions/objects from Scipy. +below what the public API is for SciPy, and give some recommendations for how +to import modules/functions/objects from SciPy. -Guidelines for importing functions from Scipy +Guidelines for importing functions from SciPy --------------------------------------------- The scipy namespace itself only contains functions imported from numpy. These @@ -77,7 +77,7 @@ API definition Every submodule listed below is public. That means that these submodules are unlikely to be renamed or changed in an incompatible way, and if that is -necessary a deprecation warning will be raised for one Scipy release before the +necessary a deprecation warning will be raised for one SciPy release before the change is made. * `scipy.cluster` diff --git a/doc/README.txt b/doc/README.txt index d3ef865c5300..2c173bb54a31 100644 --- a/doc/README.txt +++ b/doc/README.txt @@ -7,7 +7,7 @@ The easy way to build the documentation is to run python setup.py build_sphinx -This will first build Scipy in-place, and then generate documentation for it. +This will first build SciPy in-place, and then generate documentation for it. Another way ----------- @@ -16,5 +16,5 @@ Another way 2. Run ``make html`` or ``make dist`` Note that ``make html`` builds the documentation for the currently installed -version of Scipy, not the one corresponding to the source code here. +version of SciPy, not the one corresponding to the source code here. diff --git a/doc/ROADMAP.rst.txt b/doc/ROADMAP.rst.txt index 6d41e5d033d4..61017c02006c 100644 --- a/doc/ROADMAP.rst.txt +++ b/doc/ROADMAP.rst.txt @@ -341,7 +341,7 @@ New modules under discussion diff ```` -Currently Scipy doesn't provide much support for numerical differentiation. +Currently SciPy doesn't provide much support for numerical differentiation. A new ``scipy.diff`` module for that is discussed in https://github.com/scipy/scipy/issues/2035. There's also a fairly detailed GSoC proposal to build on, see `here `_. diff --git a/scipy/__init__.py b/scipy/__init__.py index 49310723b579..669e96e90e4d 100644 --- a/scipy/__init__.py +++ b/scipy/__init__.py @@ -51,7 +51,7 @@ test --- Run scipy unittests show_config --- Show scipy build configuration show_numpy_config --- Show numpy build configuration - __version__ --- Scipy version string + __version__ --- SciPy version string __numpy_version__ --- Numpy version string """ diff --git a/scipy/_build_utils/__init__.py b/scipy/_build_utils/__init__.py index d3e9eaf57569..2d9574f575c6 100644 --- a/scipy/_build_utils/__init__.py +++ b/scipy/_build_utils/__init__.py @@ -4,7 +4,7 @@ # Don't use deprecated Numpy C API. Define this to a fixed version instead of -# NPY_API_VERSION in order not to break compilation for released Scipy versions +# NPY_API_VERSION in order not to break compilation for released SciPy versions # when Numpy introduces a new deprecation. Use in setup.py:: # # config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) diff --git a/scipy/cluster/hierarchy.py b/scipy/cluster/hierarchy.py index ed625f74457e..050610afaf14 100644 --- a/scipy/cluster/hierarchy.py +++ b/scipy/cluster/hierarchy.py @@ -1852,7 +1852,7 @@ def from_mlab_linkage(Z): See Also -------- linkage: for a description of what a linkage matrix is. - to_mlab_linkage: transform from Scipy to MATLAB format. + to_mlab_linkage: transform from SciPy to MATLAB format. Examples -------- @@ -1861,7 +1861,7 @@ def from_mlab_linkage(Z): Given a linkage matrix in MATLAB format ``mZ``, we can use `scipy.cluster.hierarchy.from_mlab_linkage` to import - it into Scipy format: + it into SciPy format: >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1], ... [10, 11, 1], [3, 13, 1.29099445], @@ -1889,7 +1889,7 @@ def from_mlab_linkage(Z): As expected, the linkage matrix ``Z`` returned includes an additional column counting the number of original samples in each cluster. Also, all cluster indexes are reduced by 1 - (MATLAB format uses 1-indexing, whereas Scipy uses 0-indexing). + (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing). """ Z = np.asarray(Z, dtype=np.double, order='c') @@ -1942,7 +1942,7 @@ def to_mlab_linkage(Z): See Also -------- linkage: for a description of what a linkage matrix is. - from_mlab_linkage: transform from Matlab to Scipy format. + from_mlab_linkage: transform from Matlab to SciPy format. Examples -------- @@ -2712,7 +2712,7 @@ def fclusterdata(X, t, criterion='inconsistent', >>> from scipy.cluster.hierarchy import fclusterdata This is a convenience method that abstracts all the steps to perform in a - typical Scipy's hierarchical clustering workflow. + typical SciPy's hierarchical clustering workflow. * Transform the input data into a condensed matrix with `scipy.spatial.distance.pdist`. @@ -3015,7 +3015,7 @@ def set_link_color_palette(palette): Notes ----- - Ability to reset the palette with ``None`` added in Scipy 0.17.0. + Ability to reset the palette with ``None`` added in SciPy 0.17.0. Examples -------- diff --git a/scipy/io/matlab/mio4.py b/scipy/io/matlab/mio4.py index bf83bf7ac5bb..1778f09d9794 100644 --- a/scipy/io/matlab/mio4.py +++ b/scipy/io/matlab/mio4.py @@ -253,7 +253,7 @@ def read_sparse_array(self, hdr): ''' res = self.read_sub_array(hdr) tmp = res[:-1,:] - # All numbers are float64 in Matlab, but Scipy sparse expects int shape + # All numbers are float64 in Matlab, but SciPy sparse expects int shape dims = (int(res[-1,0]), int(res[-1,1])) I = np.ascontiguousarray(tmp[:,0],dtype='intc') # fixes byte order also J = np.ascontiguousarray(tmp[:,1],dtype='intc') diff --git a/scipy/linalg/decomp_lu.py b/scipy/linalg/decomp_lu.py index faf58c84adc0..1c424d2b0340 100644 --- a/scipy/linalg/decomp_lu.py +++ b/scipy/linalg/decomp_lu.py @@ -196,7 +196,7 @@ def lu(a, permute_l=False, overwrite_a=False, check_finite=True): Notes ----- - This is a LU factorization routine written for Scipy. + This is a LU factorization routine written for SciPy. Examples -------- diff --git a/scipy/linalg/decomp_qr.py b/scipy/linalg/decomp_qr.py index acd2ca35de53..639e89614f35 100644 --- a/scipy/linalg/decomp_qr.py +++ b/scipy/linalg/decomp_qr.py @@ -46,7 +46,7 @@ def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False, Determines what information is to be returned: either both Q and R ('full', default), only R ('r') or both Q and R but computed in economy-size ('economic', see Notes). The final option 'raw' - (added in Scipy 0.11) makes the function return two matrices + (added in SciPy 0.11) makes the function return two matrices (Q, TAU) in the internal format used by LAPACK. pivoting : bool, optional Whether or not factorization should include pivoting for rank-revealing diff --git a/scipy/odr/tests/test_odr.py b/scipy/odr/tests/test_odr.py index 3a18a5a89d8f..5b47f9a8ca17 100644 --- a/scipy/odr/tests/test_odr.py +++ b/scipy/odr/tests/test_odr.py @@ -1,6 +1,6 @@ from __future__ import division, print_function, absolute_import -# Scipy imports. +# SciPy imports. import numpy as np from numpy import pi from numpy.testing import (assert_array_almost_equal, diff --git a/scipy/optimize/_remove_redundancy.py b/scipy/optimize/_remove_redundancy.py index 0fcc07de5f1a..f2183fac4086 100644 --- a/scipy/optimize/_remove_redundancy.py +++ b/scipy/optimize/_remove_redundancy.py @@ -309,7 +309,7 @@ def _remove_redundancy_sparse(A, rhs): # I tried and tried and tried to improve performance using the # Bartels-Golub update. It works, but it's only practical if the LU # factorization can be specialized as described, and that is not possible - # until the Scipy SuperLU interface permits control over column + # until the SciPy SuperLU interface permits control over column # permutation - see issue #7700. for i in v: diff --git a/scipy/optimize/_trustregion_exact.py b/scipy/optimize/_trustregion_exact.py index 2fd1b092ea03..277cf81f166a 100644 --- a/scipy/optimize/_trustregion_exact.py +++ b/scipy/optimize/_trustregion_exact.py @@ -236,7 +236,7 @@ def __init__(self, x, fun, jac, hess, hessp=None, self.k_hard = k_hard # Get Lapack function for cholesky decomposition. - # The implemented Scipy wrapper does not return + # The implemented SciPy wrapper does not return # the incomplete factorization needed by the method. self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,)) diff --git a/scipy/optimize/nonlin.py b/scipy/optimize/nonlin.py index eaa6a1195183..7bb8ad0b593a 100644 --- a/scipy/optimize/nonlin.py +++ b/scipy/optimize/nonlin.py @@ -105,7 +105,7 @@ def residual(P): """ # Copyright (C) 2009, Pauli Virtanen -# Distributed under the same license as Scipy. +# Distributed under the same license as SciPy. from __future__ import division, print_function, absolute_import @@ -465,7 +465,7 @@ def check(self, f, x, dx): return 1 if self.iter is not None: - # backwards compatibility with Scipy 0.6.0 + # backwards compatibility with SciPy 0.6.0 return 2 * (self.iteration > self.iter) # NB: condition must succeed for rtol=inf even if norm == 0 @@ -1381,7 +1381,7 @@ class KrylovJacobian(Jacobian): Due to the use of iterative matrix inverses, these methods can deal with large nonlinear problems. - Scipy's `scipy.sparse.linalg` module offers a selection of Krylov + SciPy's `scipy.sparse.linalg` module offers a selection of Krylov solvers to choose from. The default here is `lgmres`, which is a variant of restarted GMRES iteration that reuses some of the information obtained in the previous Newton steps to invert diff --git a/scipy/optimize/optimize.py b/scipy/optimize/optimize.py index 09594162897c..54990b74317e 100644 --- a/scipy/optimize/optimize.py +++ b/scipy/optimize/optimize.py @@ -138,7 +138,7 @@ def _check_unknown_options(unknown_options): if unknown_options: msg = ", ".join(map(str, unknown_options.keys())) # Stack level 4: this is called from _minimize_*, which is - # called from another function in Scipy. Level 4 is the first + # called from another function in SciPy. Level 4 is the first # level in user code. warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4) diff --git a/scipy/optimize/tests/test_optimize.py b/scipy/optimize/tests/test_optimize.py index e50c153a1feb..ee4d93ab9256 100644 --- a/scipy/optimize/tests/test_optimize.py +++ b/scipy/optimize/tests/test_optimize.py @@ -114,11 +114,11 @@ def test_cg(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 9, self.funccalls) assert_(self.gradcalls == 7, self.gradcalls) - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[2:4], [[0, -0.5, 0.5], [0, -5.05700028e-01, 4.95985862e-01]], @@ -158,11 +158,11 @@ def test_bfgs(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 10, self.funccalls) assert_(self.gradcalls == 8, self.gradcalls) - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[6:8], [[0, -5.25060743e-01, 4.87748473e-01], [0, -5.24885582e-01, 4.87530347e-01]], @@ -206,7 +206,7 @@ def test_powell(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. # # However, some leeway must be added: the exact evaluation # count is sensitive to numerical error, and floating-point @@ -217,7 +217,7 @@ def test_powell(self): assert_(self.funccalls <= 116 + 20, self.funccalls) assert_(self.gradcalls == 0, self.gradcalls) - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[34:39], [[0.72949016, -0.44156936, 0.47100962], [0.72949016, -0.44156936, 0.48052496], @@ -247,11 +247,11 @@ def test_neldermead(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 167, self.funccalls) assert_(self.gradcalls == 0, self.gradcalls) - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[76:78], [[0.1928968, -0.62780447, 0.35166118], [0.19572515, -0.63648426, 0.35838135]], @@ -285,11 +285,11 @@ def test_neldermead_initial_simplex(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.17.0. Don't allow them to increase. + # SciPy 0.17.0. Don't allow them to increase. assert_(self.funccalls == 100, self.funccalls) assert_(self.gradcalls == 0, self.gradcalls) - # Ensure that the function behaves the same; this is from Scipy 0.15.0 + # Ensure that the function behaves the same; this is from SciPy 0.15.0 assert_allclose(self.trace[50:52], [[0.14687474, -0.5103282, 0.48252111], [0.14474003, -0.5282084, 0.48743951]], @@ -349,14 +349,14 @@ def test_ncg(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 7, self.funccalls) assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0 #assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 #assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 #assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[3:5], [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], @@ -384,13 +384,13 @@ def test_ncg_hess(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 7, self.funccalls) assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[3:5], [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], @@ -418,13 +418,13 @@ def test_ncg_hessp(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 7, self.funccalls) assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[3:5], [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], @@ -539,11 +539,11 @@ def test_l_bfgs_b(self): atol=1e-6) # Ensure that function call counts are 'known good'; these are from - # Scipy 0.7.0. Don't allow them to increase. + # SciPy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 7, self.funccalls) assert_(self.gradcalls == 5, self.gradcalls) - # Ensure that the function behaves the same; this is from Scipy 0.7.0 + # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[3:5], [[0., -0.52489628, 0.48753042], [0., -0.52489628, 0.48753042]], diff --git a/scipy/signal/signaltools.py b/scipy/signal/signaltools.py index 4b460f9aefce..cce171fdbd37 100644 --- a/scipy/signal/signaltools.py +++ b/scipy/signal/signaltools.py @@ -521,7 +521,7 @@ def _np_conv_ok(volume, kernel, mode): """ See if numpy supports convolution of `volume` and `kernel` (i.e. both are 1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the - size of the larger input, while Scipy's uses the size of the first input. + size of the larger input, while SciPy's uses the size of the first input. Invalid mode strings will return False and be caught by the calling func. """ diff --git a/scipy/sparse/_matrix_io.py b/scipy/sparse/_matrix_io.py index 5f31005811f2..0db0235876f1 100644 --- a/scipy/sparse/_matrix_io.py +++ b/scipy/sparse/_matrix_io.py @@ -138,7 +138,7 @@ def load_npz(file): if sys.version_info[0] >= 3 and not isinstance(matrix_format, str): # Play safe with Python 2 vs 3 backward compatibility; - # files saved with Scipy < 1.0.0 may contain unicode or bytes. + # files saved with SciPy < 1.0.0 may contain unicode or bytes. matrix_format = matrix_format.decode('ascii') try: diff --git a/scipy/sparse/linalg/isolve/_gcrotmk.py b/scipy/sparse/linalg/isolve/_gcrotmk.py index 208385564e7d..2f7278fd957d 100644 --- a/scipy/sparse/linalg/isolve/_gcrotmk.py +++ b/scipy/sparse/linalg/isolve/_gcrotmk.py @@ -1,5 +1,5 @@ # Copyright (C) 2015, Pauli Virtanen -# Distributed under the same license as Scipy. +# Distributed under the same license as SciPy. from __future__ import division, print_function, absolute_import diff --git a/scipy/sparse/linalg/isolve/lgmres.py b/scipy/sparse/linalg/isolve/lgmres.py index 5afab621467d..cfdaadfe3ab5 100644 --- a/scipy/sparse/linalg/isolve/lgmres.py +++ b/scipy/sparse/linalg/isolve/lgmres.py @@ -1,5 +1,5 @@ # Copyright (C) 2009, Pauli Virtanen -# Distributed under the same license as Scipy. +# Distributed under the same license as SciPy. from __future__ import division, print_function, absolute_import diff --git a/scipy/sparse/linalg/isolve/tests/test_iterative.py b/scipy/sparse/linalg/isolve/tests/test_iterative.py index 878101c04f60..878c24fa0fc0 100644 --- a/scipy/sparse/linalg/isolve/tests/test_iterative.py +++ b/scipy/sparse/linalg/isolve/tests/test_iterative.py @@ -544,7 +544,7 @@ def store_residual(r, rvec): sup.filter(DeprecationWarning, ".*called without specifying.*") x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback) - # Expected output from Scipy 1.0.0 + # Expected output from SciPy 1.0.0 assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10) # Test preconditioned callback @@ -555,7 +555,7 @@ def store_residual(r, rvec): sup.filter(DeprecationWarning, ".*called without specifying.*") x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback) - # Expected output from Scipy 1.0.0 (callback has preconditioned residual!) + # Expected output from SciPy 1.0.0 (callback has preconditioned residual!) assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10) def test_abi(self): diff --git a/scipy/sparse/linalg/isolve/tests/test_lsmr.py b/scipy/sparse/linalg/isolve/tests/test_lsmr.py index 33c574e8e83f..ea4eb0de8dba 100644 --- a/scipy/sparse/linalg/isolve/tests/test_lsmr.py +++ b/scipy/sparse/linalg/isolve/tests/test_lsmr.py @@ -1,6 +1,6 @@ """ Copyright (C) 2010 David Fong and Michael Saunders -Distributed under the same license as Scipy +Distributed under the same license as SciPy Testing Code for LSMR. diff --git a/scipy/sparse/sparsetools.py b/scipy/sparse/sparsetools.py index 1b8a9af104d4..c7f190ee86cd 100644 --- a/scipy/sparse/sparsetools.py +++ b/scipy/sparse/sparsetools.py @@ -4,7 +4,7 @@ """ from numpy import deprecate -# This file shouldn't be imported by scipy --- Scipy code should use +# This file shouldn't be imported by scipy --- SciPy code should use # internally scipy.sparse._sparsetools diff --git a/scipy/sparse/tests/test_matrix_io.py b/scipy/sparse/tests/test_matrix_io.py index 304a1573f6dc..d97f1fdefd01 100644 --- a/scipy/sparse/tests/test_matrix_io.py +++ b/scipy/sparse/tests/test_matrix_io.py @@ -73,7 +73,7 @@ def __reduce__(self): def test_py23_compatibility(): # Try loading files saved on Python 2 and Python 3. They are not - # the same, since files saved with Scipy versions < 1.0.0 may + # the same, since files saved with SciPy versions < 1.0.0 may # contain unicode. a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz')) diff --git a/scipy/spatial/_spherical_voronoi.py b/scipy/spatial/_spherical_voronoi.py index 661796cc578d..eb2d1b4dca26 100644 --- a/scipy/spatial/_spherical_voronoi.py +++ b/scipy/spatial/_spherical_voronoi.py @@ -8,7 +8,7 @@ # Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson, # Nikolai Nowaczyk, Joe Pitt-Francis, 2015. # -# Distributed under the same BSD license as Scipy. +# Distributed under the same BSD license as SciPy. # import numpy as np diff --git a/scipy/spatial/distance.py b/scipy/spatial/distance.py index ab188f678296..266e4ca42346 100644 --- a/scipy/spatial/distance.py +++ b/scipy/spatial/distance.py @@ -2122,7 +2122,7 @@ def squareform(X, force="no", checks=True): :math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` and all diagonal elements are zero. - In Scipy 0.19.0, ``squareform`` stopped casting all input types to + In SciPy 0.19.0, ``squareform`` stopped casting all input types to float64, and started returning arrays of the same dtype as the input. """ diff --git a/scipy/special/orthogonal.py b/scipy/special/orthogonal.py index 06da9cd2810b..59e5b062fe7b 100644 --- a/scipy/special/orthogonal.py +++ b/scipy/special/orthogonal.py @@ -76,7 +76,7 @@ from __future__ import division, print_function, absolute_import -# Scipy imports. +# SciPy imports. import numpy as np from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int, hstack, arccos, arange) diff --git a/scipy/special/tests/test_mpmath.py b/scipy/special/tests/test_mpmath.py index ef56c1e2bcdb..96aeb536b2bf 100644 --- a/scipy/special/tests/test_mpmath.py +++ b/scipy/special/tests/test_mpmath.py @@ -1,5 +1,5 @@ """ -Test Scipy functions versus mpmath, if available. +Test SciPy functions versus mpmath, if available. """ from __future__ import division, print_function, absolute_import @@ -1383,7 +1383,7 @@ def test_hyp1f1_complex(self): @nonfunctional_tooslow def test_hyp2f1_complex(self): - # Scipy's hyp2f1 seems to have performance and accuracy problems + # SciPy's hyp2f1 seems to have performance and accuracy problems assert_mpmath_equal(lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x), exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)), [Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()], diff --git a/scipy/stats/kde.py b/scipy/stats/kde.py index b4896bd6f295..0cd947958256 100644 --- a/scipy/stats/kde.py +++ b/scipy/stats/kde.py @@ -9,7 +9,7 @@ # Date: 2004-08-09 # # Modified: 2005-02-10 by Robert Kern. -# Contributed to Scipy +# Contributed to SciPy # 2005-10-07 by Robert Kern. # Some fixes to match the new scipy_core # @@ -22,7 +22,7 @@ # Standard library imports. import warnings -# Scipy imports. +# SciPy imports. from scipy._lib.six import callable, string_types from scipy import linalg, special from scipy.special import logsumexp diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index f38339533d83..73599b206d32 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -284,7 +284,7 @@ class TestFisherExact(object): Note that in SciPy 0.9.0 this was not working well for large numbers due to inaccuracy of the hypergeom distribution (see #1218). Fixed now. - Also note that R and Scipy have different argument formats for their + Also note that R and SciPy have different argument formats for their hypergeometric distribution functions. R: From e8b6e166b046065a5f41833008892742cf738bde Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Sun, 18 Nov 2018 14:39:58 +0100 Subject: [PATCH 16/70] CI: Use released NumPy and newer Cython [ci skip] --- .circleci/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1eec86bcff87..40f660575b6e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -112,10 +112,10 @@ jobs: export CCACHE_COMPRESS=1 export NPY_NUM_BUILD_JOBS=`pypy3 -c 'import multiprocessing as mp; print(mp.cpu_count())'` export PATH=/usr/lib/ccache:$PATH - # XXX: use "numpy>=1.15.0" when it's released pypy3 -mpip install --upgrade pip setuptools wheel - pypy3 -mpip install --no-build-isolation --extra-index https://antocuni.github.io/pypy-wheels/ubuntu pytest pytest-xdist Tempita "Cython>=0.28.2" mpmath - pypy3 -mpip install --no-build-isolation git+https://github.com/numpy/numpy.git@db552b5b6b37f2ff085b304751d7a2ebed26adc9 + pypy3 -mpip install Cython>=0.28.5 + pypy3 -mpip install numpy>=1.15.0 + pypy3 -mpip install --no-build-isolation --extra-index https://antocuni.github.io/pypy-wheels/ubuntu pytest pytest-xdist Tempita mpmath - run: name: build command: | From 0ffcf100ceebd526107cfcb689c68a2c5b0ebe48 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 15 Nov 2018 22:28:39 -0800 Subject: [PATCH 17/70] DOC: roadmap update for changes over the last year. Plus some evolving insights ... --- doc/ROADMAP.rst.txt | 116 +++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 62 deletions(-) diff --git a/doc/ROADMAP.rst.txt b/doc/ROADMAP.rst.txt index 61017c02006c..f173e5055a45 100644 --- a/doc/ROADMAP.rst.txt +++ b/doc/ROADMAP.rst.txt @@ -67,10 +67,15 @@ Regarding Cython code: Regarding build environments: - SciPy builds from source on Windows now with a MSVC + MinGW-w64 gfortran - toolchain. This still needs to prove itself, but is looking good so far. - - Support for Accelerate will be dropped, likely in SciPy 1.1.0. If there is - enough interest, we may want to write wrappers so the BLAS part of Accelerate - can still be used. + toolchain, which we're using for official releases. + MSVC + Intel Fortran + MKL works as well, and is easier for users (as long + as they have access to ifort and MKL of course). This mainly needs better + documentation at the moment. + - We're aiming to gradually increase the minimum version of LAPACK that is + required, so we can use never features. Support for Accelerate on macOS + has been dropped. We do rely quite heavily on OpenBLAS, and its stability + is a worry - improvements in testing and build documentation at least are + needed. Continuous integration is in good shape, it covers Windows, macOS and Linux, as well as a range of versions of our dependencies and building release quality wheels. @@ -91,12 +96,15 @@ This module is basically done, low-maintenance and without open issues. fftpack ``````` -Needed: +We aim to follow NumPy in adopting ``pocketfft`` (see `this NumPy PR +`__). That will address a number of +maintenance issues, and increase performance (both accuracy and speed). +Of particular interest regarding performance is the Bluestein algorithm (or +chirp Z-transform), which we have wanting to add to ``fftpack`` for a long +time. - - solve issues with single precision: large errors, disabled for difficult sizes - - fix caching bug - - Bluestein algorithm (or chirp Z-transform) - - deprecate fftpack.convolve as public function (was not meant to be public) +We probably want to deprecate fftpack.convolve as public function (it was not +meant to be public). There's a large overlap with ``numpy.fft``. This duplication has to change (both are too widely used to deprecate one); in the documentation we should @@ -105,6 +113,11 @@ If there are differences in signature or functionality, the best version should be picked case by case (example: numpy's ``rfft`` is preferred, see gh-2487). +Ideas for new features: + +- Add a backend/plugin system. At the moment pyFFTW is monkeypatching SciPy, + and ``mkl_fft`` provides ``fftpack``-compatible functions as well. We should + provide a method to support such packages. integrate ````````` @@ -198,21 +211,20 @@ The morphology interface needs to be standardized: odr ``` -Rename the module to ``regression`` or ``fitting``, include -``optimize.curve_fit``. This module will then provide a home for other fitting -functionality - what exactly needs to be worked out in more detail, a -discussion can be found at https://github.com/scipy/scipy/pull/448. +This module is in reasonable shape, although it could use a bit more maintence. +No major plans or wishes here. optimize ```````` -Overall this module is in reasonably good shape, however it is missing a few -more good global optimizers as well as large-scale optimizers. These should be -added. Other things that are needed: +Overall this module is in good shape. Two good good global optimizers were +added in 1.2.0; large-scale optimizers is still a gap that could be filled. +Other things that are needed: + - Add functionality to the benchmark suite to compare results more easily + (e.g. with summary plots). - deprecate the ``fmin_*`` functions in the documentation, ``minimize`` is preferred. - - clearly define what's out of scope for this module. signal @@ -250,21 +262,26 @@ for those). sparse `````` -The sparse matrix formats are getting feature-complete but are slow ... -reimplement parts in Cython? - - - Small matrices are slower than PySparse, needs fixing - -There are a lot of formats. These should be kept, but -improvements/optimizations should go into CSR/CSC, which are the preferred -formats. LIL may be the exception, it's inherently inefficient. It could be -dropped if DOK is extended to support all the operations LIL currently -provides. Alternatives are being worked on, see https://github.com/ev-br/sparr -and https://github.com/perimosocordiae/sparray. - -Ideas for new features: - - - Sparse arrays now act like np.matrix. We want sparse *arrays*. +The sparse matrix formats are mostly feature-complete, however the main issue +is that they act like ``numpy.matrix`` (which will be deprecated in NumPy at +some point). What we want is sparse arrays, that act like ``numpy.ndarray``. +This is being worked on in https://github.com/pydata/sparse, which is quite far +along. The tentative plan is: + +- Start depending on ``pydata/sparse`` once it's feature-complete enough (it + still needs a CSC/CSR equivalent) and okay performance-wise. +- Add support for ``pydata/sparse`` to ``scipy.sparse.linalg`` (and perhaps to + ``scipy.sparse.csgraph`` after that). +- Indicate in the documentation that for new code users should prefer + ``pydata/sparse`` over sparse matrices. +- When NumPy deprecates ``numpy.matrix``, vendor that or maintain it as a + stand-alone package. + +Regarding the different sparse matrix formats: there are a lot of them. These +should be kept, but improvements/optimizations should go into CSR/CSC, which +are the preferred formats. LIL may be the exception, it's inherently +inefficient. It could be dropped if DOK is extended to support all the +operations LIL currently provides. sparse.csgraph @@ -293,9 +310,10 @@ Ideas for new features: - Wrappers for PROPACK for faster sparse SVD computation. + spatial ``````` -QHull wrappers are in good shape. +QHull wrappers are in good shape, as is ``cKDTree``. Needed: @@ -329,33 +347,7 @@ functions, and spheroidal wave functions. Three possible ways to handle this: stats ````` -``stats.distributions`` is in good shape. - -``gaussian_kde`` is in good shape but limited. It should not be expanded -probably, this fits better in Statsmodels (which already has a lot more KDE -functionality). +This module is in good shape overall. New functionality that's similar to +what's already present can continue to be added; more advanced statistical +routines may fit better in ``statsmodels``. - -New modules under discussion ----------------------------- - -diff -```` -Currently SciPy doesn't provide much support for numerical differentiation. -A new ``scipy.diff`` module for that is discussed in -https://github.com/scipy/scipy/issues/2035. There's also a fairly detailed -GSoC proposal to build on, see `here `_. -There has been a second (unsuccessful) GSoC project in 2017. Recent discussion -and the host of alternatives available make it unlikely that a new ``scipy.diff`` -submodule will be added in the near future. - -There is also ``approx_derivative`` in ``optimize``, which is still private -but could form a solid basis for this module. - -transforms -`````````` -This module was discussed previously, mainly to provide a home for -discrete wavelet transform functionality. Other transforms could fit as well, -for example there's a PR for a Hankel transform . -*Note: this is on the back burner, because the plans to integrate PyWavelets -DWT code has been put on hold.* From 0d95d6080616d61e8be9e59cc0fa8f239247df8c Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Fri, 16 Nov 2018 13:45:01 -0800 Subject: [PATCH 18/70] Clarify ndimage section of roadmap --- doc/ROADMAP.rst.txt | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/doc/ROADMAP.rst.txt b/doc/ROADMAP.rst.txt index f173e5055a45..c085d6df1c32 100644 --- a/doc/ROADMAP.rst.txt +++ b/doc/ROADMAP.rst.txt @@ -187,13 +187,21 @@ been moved to another submodule or deprecated. The few that are left: ndimage ``````` -Underlying ``ndimage`` is a powerful interpolation engine. Unfortunately, it was -never decided whether to use a pixel model (``(1, 1)`` elements with centers -``(0.5, 0.5)``) or a data point model (values at points on a grid). Over time, -it seems that the data point model is better defined and easier to implement. -We therefore propose to move to this data representation for 1.0, and to vet -all interpolation code to ensure that boundary values, transformations, etc. -are correctly computed. Addressing this issue will close several issues, +Underlying ``ndimage`` is a powerful interpolation engine. Users come +with an expectation of one of two models: a pixel model with ``(1, +1)`` elements having centers ``(0.5, 0.5)``, or a data point model, +where values are defined at points on a grid. Over time, we've become +convinced that the data point model is better defined and easier to +implement, but this should be clearly communicated in the documentation. + +More importantly, still, SciPy implements one *variant* of this data +point model, where datapoints at any two extremes of an axis share a +spatial location under *periodic wrapping* mode. E.g., in a 1D array, +you would have ``x[0]`` and ``x[-1]`` co-located. A very common +use-case, however, is for signals to be periodic, with equal spacing +between the first and last element along an axis (instead of zero +spacing). Adding wrapping modes for this use-case and disambiguating +them from the existing implementation should address several issues, including #1323, #1903, #2045 and #2640. The morphology interface needs to be standardized: From 0b30dd04d587595a6ef2a77fc0eeae0018085b32 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 19 Nov 2018 11:20:24 -0800 Subject: [PATCH 19/70] DOC: address review comments, add note on linprog to roadmap. --- doc/ROADMAP.rst.txt | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/doc/ROADMAP.rst.txt b/doc/ROADMAP.rst.txt index c085d6df1c32..b5b4db10b5a5 100644 --- a/doc/ROADMAP.rst.txt +++ b/doc/ROADMAP.rst.txt @@ -72,9 +72,10 @@ Regarding build environments: as they have access to ifort and MKL of course). This mainly needs better documentation at the moment. - We're aiming to gradually increase the minimum version of LAPACK that is - required, so we can use never features. Support for Accelerate on macOS + required, so we can use newer features. Support for Accelerate on macOS has been dropped. We do rely quite heavily on OpenBLAS, and its stability - is a worry - improvements in testing and build documentation at least are + is a worry (often only one of the recent releases works without test + failures) - improvements in testing and build documentation at least are needed. Continuous integration is in good shape, it covers Windows, macOS and Linux, as well @@ -100,7 +101,7 @@ We aim to follow NumPy in adopting ``pocketfft`` (see `this NumPy PR `__). That will address a number of maintenance issues, and increase performance (both accuracy and speed). Of particular interest regarding performance is the Bluestein algorithm (or -chirp Z-transform), which we have wanting to add to ``fftpack`` for a long +chirp Z-transform), which we have been wanting to add to ``fftpack`` for a long time. We probably want to deprecate fftpack.convolve as public function (it was not @@ -219,16 +220,18 @@ The morphology interface needs to be standardized: odr ``` -This module is in reasonable shape, although it could use a bit more maintence. -No major plans or wishes here. +This module is in reasonable shape, although it could use a bit more +maintenance. No major plans or wishes here. optimize ```````` -Overall this module is in good shape. Two good good global optimizers were -added in 1.2.0; large-scale optimizers is still a gap that could be filled. -Other things that are needed: +Overall this module is in good shape. Two good global optimizers were added in +1.2.0; large-scale optimizers is still a gap that could be filled. Other +things that are needed: + - Many ideas for additional functionality (e.g. integer constraints, sparse + matrix support, performance improvements) in ``linprog``, see gh-9269. - Add functionality to the benchmark suite to compare results more easily (e.g. with summary plots). - deprecate the ``fmin_*`` functions in the documentation, ``minimize`` is From e1adcbcf038102cb2861aead8da391ab53e410d1 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Fri, 16 Nov 2018 15:56:21 -0800 Subject: [PATCH 20/70] TST: test_kolmogorov xfail 32-bit * test_oneovernclose() was failing in 32-bit Python interpreter tests in our wheel build infrastructure -- marked as xfail for now --- scipy/special/tests/test_kolmogorov.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scipy/special/tests/test_kolmogorov.py b/scipy/special/tests/test_kolmogorov.py index ebabde577194..c8b5e892ec49 100644 --- a/scipy/special/tests/test_kolmogorov.py +++ b/scipy/special/tests/test_kolmogorov.py @@ -1,6 +1,8 @@ from __future__ import division, print_function, absolute_import import itertools +import sys +import pytest import numpy as np from numpy.testing import assert_ @@ -228,6 +230,8 @@ def test_oneovern(self): dataset0 = np.column_stack([n, x, pp]) FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float]) + @pytest.mark.xfail(sys.maxsize <= 2**32, + reason="requires 64-bit platform") def test_oneovernclose(self): # Check derivative at x=1/n (Discontinuous at x=1/n, test on either side: x=1/n +/- 2epsilon) n = np.arange(3, 20) From 1cde30518fdc46c2c765beaa3e132e322ea7a579 Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Sun, 30 Sep 2018 11:41:38 +0200 Subject: [PATCH 21/70] DEP: Remove deprecated functions for 1.2.0 --- scipy/misc/__init__.py | 81 +-- scipy/misc/pilutil.py | 621 ---------------------- scipy/misc/tests/data/3x3x3.png | Bin 102 -> 0 bytes scipy/misc/tests/data/3x3x4.png | Bin 139 -> 0 bytes scipy/misc/tests/data/3x4x3.png | Bin 98 -> 0 bytes scipy/misc/tests/data/3x4x4.png | Bin 140 -> 0 bytes scipy/misc/tests/data/3x5x3.png | Bin 106 -> 0 bytes scipy/misc/tests/data/3x5x4.png | Bin 153 -> 0 bytes scipy/misc/tests/data/4x3x3.png | Bin 125 -> 0 bytes scipy/misc/tests/data/4x3x4.png | Bin 144 -> 0 bytes scipy/misc/tests/data/4x4x3.png | Bin 83 -> 0 bytes scipy/misc/tests/data/4x4x4.png | Bin 86 -> 0 bytes scipy/misc/tests/data/4x5x3.png | Bin 133 -> 0 bytes scipy/misc/tests/data/4x5x4.png | Bin 159 -> 0 bytes scipy/misc/tests/data/5x3x3.png | Bin 149 -> 0 bytes scipy/misc/tests/data/5x3x4.png | Bin 157 -> 0 bytes scipy/misc/tests/data/5x4x3.png | Bin 91 -> 0 bytes scipy/misc/tests/data/5x4x4.png | Bin 97 -> 0 bytes scipy/misc/tests/data/5x5x3.png | Bin 100 -> 0 bytes scipy/misc/tests/data/5x5x4.png | Bin 112 -> 0 bytes scipy/misc/tests/data/blocks2bit.png | Bin 77 -> 0 bytes scipy/misc/tests/data/box1.png | Bin 208 -> 0 bytes scipy/misc/tests/data/foo3x5x4indexed.png | Bin 116 -> 0 bytes scipy/misc/tests/data/icon.png | Bin 2122 -> 0 bytes scipy/misc/tests/data/icon_mono.png | Bin 449 -> 0 bytes scipy/misc/tests/data/icon_mono_flat.png | Bin 412 -> 0 bytes scipy/misc/tests/data/pattern4bit.png | Bin 169 -> 0 bytes scipy/misc/tests/test_common.py | 21 +- scipy/misc/tests/test_pilutil.py | 283 ---------- scipy/ndimage/__init__.py | 9 - scipy/ndimage/io.py | 36 -- scipy/ndimage/tests/test_io.py | 38 -- 32 files changed, 3 insertions(+), 1086 deletions(-) delete mode 100644 scipy/misc/pilutil.py delete mode 100644 scipy/misc/tests/data/3x3x3.png delete mode 100644 scipy/misc/tests/data/3x3x4.png delete mode 100644 scipy/misc/tests/data/3x4x3.png delete mode 100644 scipy/misc/tests/data/3x4x4.png delete mode 100644 scipy/misc/tests/data/3x5x3.png delete mode 100644 scipy/misc/tests/data/3x5x4.png delete mode 100644 scipy/misc/tests/data/4x3x3.png delete mode 100644 scipy/misc/tests/data/4x3x4.png delete mode 100644 scipy/misc/tests/data/4x4x3.png delete mode 100644 scipy/misc/tests/data/4x4x4.png delete mode 100644 scipy/misc/tests/data/4x5x3.png delete mode 100644 scipy/misc/tests/data/4x5x4.png delete mode 100644 scipy/misc/tests/data/5x3x3.png delete mode 100644 scipy/misc/tests/data/5x3x4.png delete mode 100644 scipy/misc/tests/data/5x4x3.png delete mode 100644 scipy/misc/tests/data/5x4x4.png delete mode 100644 scipy/misc/tests/data/5x5x3.png delete mode 100644 scipy/misc/tests/data/5x5x4.png delete mode 100644 scipy/misc/tests/data/blocks2bit.png delete mode 100644 scipy/misc/tests/data/box1.png delete mode 100644 scipy/misc/tests/data/foo3x5x4indexed.png delete mode 100644 scipy/misc/tests/data/icon.png delete mode 100644 scipy/misc/tests/data/icon_mono.png delete mode 100644 scipy/misc/tests/data/icon_mono_flat.png delete mode 100644 scipy/misc/tests/data/pattern4bit.png delete mode 100644 scipy/misc/tests/test_pilutil.py delete mode 100644 scipy/ndimage/io.py delete mode 100644 scipy/ndimage/tests/test_io.py diff --git a/scipy/misc/__init__.py b/scipy/misc/__init__.py index 5a1cf0733595..46247c003ceb 100644 --- a/scipy/misc/__init__.py +++ b/scipy/misc/__init__.py @@ -7,10 +7,6 @@ Various utilities that don't have another home. -Note that Pillow (https://python-pillow.org/) is not a dependency -of SciPy, but the image manipulation functions indicated in the list -below are not available without it. - .. autosummary:: :toctree: generated/ @@ -20,89 +16,14 @@ face - Get example image for processing electrocardiogram - Load an example of a one-dimensional signal. -Deprecated functions: - -.. autosummary:: - :toctree: generated/ - - bytescale - Byte scales an array (image) [requires Pillow] - fromimage - Return a copy of a PIL image as a numpy array [requires Pillow] - imfilter - Simple filtering of an image [requires Pillow] - imread - Read an image file from a filename [requires Pillow] - imresize - Resize an image [requires Pillow] - imrotate - Rotate an image counter-clockwise [requires Pillow] - imsave - Save an array to an image file [requires Pillow] - imshow - Simple showing of an image through an external viewer [requires Pillow] - toimage - Takes a numpy array and returns a PIL image [requires Pillow] - - -Deprecated aliases: - -.. autosummary:: - :toctree: generated/ - - comb - Combinations of N things taken k at a time, "N choose k" (imported from `scipy.special`) - factorial - The factorial function, ``n! = special.gamma(n+1)`` - (imported from `scipy.special`) - factorial2 - Double factorial, ``(n!)!`` (imported from `scipy.special`) - factorialk - ``(...((n!)!)!...)!`` where there are k '!' (imported from `scipy.special`) - logsumexp - Compute the log of the sum of exponentials of input elements - (imported from `scipy.special`) - pade - Pade approximation to function as the ratio of two polynomials. - (imported from `scipy.interpolate`) - info - Get help information for a function, class, or module. (imported from `numpy`) - source - Print function source code. (imported from `numpy`) - who - Print the Numpy arrays in the given dictionary. (imported from `numpy`) - """ from __future__ import division, print_function, absolute_import -__all__ = ['who', 'source', 'info', 'doccer', 'pade', - 'comb', 'factorial', 'factorial2', 'factorialk', 'logsumexp'] - from . import doccer from .common import * -from numpy import who as _who, source as _source, info as _info -import numpy as np -from scipy.interpolate._pade import pade as _pade -from scipy.special import (comb as _comb, logsumexp as _lsm, - factorial as _fact, factorial2 as _fact2, factorialk as _factk) - -import sys - -_msg = ("Importing `%(name)s` from scipy.misc is deprecated in scipy 1.0.0. Use " - "`scipy.special.%(name)s` instead.") -comb = np.deprecate(_comb, message=_msg % {"name": _comb.__name__}) -logsumexp = np.deprecate(_lsm, message=_msg % {"name": _lsm.__name__}) -factorial = np.deprecate(_fact, message=_msg % {"name": _fact.__name__}) -factorial2 = np.deprecate(_fact2, message=_msg % {"name": _fact2.__name__}) -factorialk = np.deprecate(_factk, message=_msg % {"name": _factk.__name__}) - -_msg = ("Importing `pade` from scipy.misc is deprecated in scipy 1.0.0. Use " - "`scipy.interpolate.pade` instead.") -pade = np.deprecate(_pade, message=_msg) - -_msg = ("Importing `%(name)s` from scipy.misc is deprecated in scipy 1.0.0. Use " - "`numpy.%(name)s` instead.") -who = np.deprecate(_who, message=_msg % {"name": "who"}) -source = np.deprecate(_source, message=_msg % {"name": "source"}) - -@np.deprecate(message=_msg % {"name": "info.(..., toplevel='scipy')"}) -def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'): - return _info(object, maxwidth, output, toplevel) - - -info.__doc__ = _info.__doc__ -del sys -try: - from .pilutil import * - from . import pilutil - __all__ += pilutil.__all__ - del pilutil -except ImportError: - pass +__all__ = ['doccer'] from . import common __all__ += common.__all__ diff --git a/scipy/misc/pilutil.py b/scipy/misc/pilutil.py deleted file mode 100644 index 4c5d28681c8d..000000000000 --- a/scipy/misc/pilutil.py +++ /dev/null @@ -1,621 +0,0 @@ -""" -A collection of image utilities using the Python Imaging Library (PIL). - -Note that PIL is not a dependency of SciPy and this module is not -available on systems that don't have PIL installed. - -""" -from __future__ import division, print_function, absolute_import - -# Functions which need the PIL - -import numpy -import tempfile - -from numpy import (amin, amax, ravel, asarray, arange, ones, newaxis, - transpose, iscomplexobj, uint8, issubdtype, array) - -try: - from PIL import Image, ImageFilter -except ImportError: - import Image - import ImageFilter - - -if not hasattr(Image, 'frombytes'): - Image.frombytes = Image.fromstring - -__all__ = ['fromimage', 'toimage', 'imsave', 'imread', 'bytescale', - 'imrotate', 'imresize', 'imshow', 'imfilter'] - - -@numpy.deprecate(message="`bytescale` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.2.0.") -def bytescale(data, cmin=None, cmax=None, high=255, low=0): - """ - Byte scales an array (image). - - Byte scaling means converting the input image to uint8 dtype and scaling - the range to ``(low, high)`` (default 0-255). - If the input image already has dtype uint8, no scaling is done. - - This function is only available if Python Imaging Library (PIL) is installed. - - Parameters - ---------- - data : ndarray - PIL image data array. - cmin : scalar, optional - Bias scaling of small values. Default is ``data.min()``. - cmax : scalar, optional - Bias scaling of large values. Default is ``data.max()``. - high : scalar, optional - Scale max value to `high`. Default is 255. - low : scalar, optional - Scale min value to `low`. Default is 0. - - Returns - ------- - img_array : uint8 ndarray - The byte-scaled array. - - Examples - -------- - >>> from scipy.misc import bytescale - >>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ], - ... [ 73.88003259, 80.91433048, 4.88878881], - ... [ 51.53875334, 34.45808177, 27.5873488 ]]) - >>> bytescale(img) - array([[255, 0, 236], - [205, 225, 4], - [140, 90, 70]], dtype=uint8) - >>> bytescale(img, high=200, low=100) - array([[200, 100, 192], - [180, 188, 102], - [155, 135, 128]], dtype=uint8) - >>> bytescale(img, cmin=0, cmax=255) - array([[91, 3, 84], - [74, 81, 5], - [52, 34, 28]], dtype=uint8) - - """ - if data.dtype == uint8: - return data - - if high > 255: - raise ValueError("`high` should be less than or equal to 255.") - if low < 0: - raise ValueError("`low` should be greater than or equal to 0.") - if high < low: - raise ValueError("`high` should be greater than or equal to `low`.") - - if cmin is None: - cmin = data.min() - if cmax is None: - cmax = data.max() - - cscale = cmax - cmin - if cscale < 0: - raise ValueError("`cmax` should be larger than `cmin`.") - elif cscale == 0: - cscale = 1 - - scale = float(high - low) / cscale - bytedata = (data - cmin) * scale + low - return (bytedata.clip(low, high) + 0.5).astype(uint8) - - -@numpy.deprecate(message="`imread` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.2.0.\n" - "Use ``imageio.imread`` instead.") -def imread(name, flatten=False, mode=None): - """ - Read an image from a file as an array. - - This function is only available if Python Imaging Library (PIL) is installed. - - Parameters - ---------- - name : str or file object - The file name or file object to be read. - flatten : bool, optional - If True, flattens the color layers into a single gray-scale layer. - mode : str, optional - Mode to convert image to, e.g. ``'RGB'``. See the Notes for more - details. - - Returns - ------- - imread : ndarray - The array obtained by reading the image. - - Notes - ----- - `imread` uses the Python Imaging Library (PIL) to read an image. - The following notes are from the PIL documentation. - - `mode` can be one of the following strings: - - * 'L' (8-bit pixels, black and white) - * 'P' (8-bit pixels, mapped to any other mode using a color palette) - * 'RGB' (3x8-bit pixels, true color) - * 'RGBA' (4x8-bit pixels, true color with transparency mask) - * 'CMYK' (4x8-bit pixels, color separation) - * 'YCbCr' (3x8-bit pixels, color video format) - * 'I' (32-bit signed integer pixels) - * 'F' (32-bit floating point pixels) - - PIL also provides limited support for a few special modes, including - 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' - (true color with premultiplied alpha). - - When translating a color image to black and white (mode 'L', 'I' or - 'F'), the library uses the ITU-R 601-2 luma transform:: - - L = R * 299/1000 + G * 587/1000 + B * 114/1000 - - When `flatten` is True, the image is converted using mode 'F'. - When `mode` is not None and `flatten` is True, the image is first - converted according to `mode`, and the result is then flattened using - mode 'F'. - - """ - - im = Image.open(name) - return fromimage(im, flatten=flatten, mode=mode) - - -@numpy.deprecate(message="`imsave` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.2.0.\n" - "Use ``imageio.imwrite`` instead.") -def imsave(name, arr, format=None): - """ - Save an array as an image. - - This function is only available if Python Imaging Library (PIL) is installed. - - .. warning:: - - This function uses `bytescale` under the hood to rescale images to use - the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. - It will also cast data for 2-D images to ``uint32`` for ``mode=None`` - (which is the default). - - Parameters - ---------- - name : str or file object - Output file name or file object. - arr : ndarray, MxN or MxNx3 or MxNx4 - Array containing image values. If the shape is ``MxN``, the array - represents a grey-level image. Shape ``MxNx3`` stores the red, green - and blue bands along the last dimension. An alpha layer may be - included, specified as the last colour band of an ``MxNx4`` array. - format : str - Image format. If omitted, the format to use is determined from the - file name extension. If a file object was used instead of a file name, - this parameter should always be used. - - Examples - -------- - Construct an array of gradient intensity values and save to file: - - >>> from scipy.misc import imsave - >>> x = np.zeros((255, 255)) - >>> x = np.zeros((255, 255), dtype=np.uint8) - >>> x[:] = np.arange(255) - >>> imsave('gradient.png', x) - - Construct an array with three colour bands (R, G, B) and store to file: - - >>> rgb = np.zeros((255, 255, 3), dtype=np.uint8) - >>> rgb[..., 0] = np.arange(255) - >>> rgb[..., 1] = 55 - >>> rgb[..., 2] = 1 - np.arange(255) - >>> imsave('rgb_gradient.png', rgb) - - """ - im = toimage(arr, channel_axis=2) - if format is None: - im.save(name) - else: - im.save(name, format) - return - - -@numpy.deprecate(message="`fromimage` is deprecated in SciPy 1.0.0. " - "and will be removed in 1.2.0.\n" - "Use ``np.asarray(im)`` instead.") -def fromimage(im, flatten=False, mode=None): - """ - Return a copy of a PIL image as a numpy array. - - This function is only available if Python Imaging Library (PIL) is installed. - - Parameters - ---------- - im : PIL image - Input image. - flatten : bool - If true, convert the output to grey-scale. - mode : str, optional - Mode to convert image to, e.g. ``'RGB'``. See the Notes of the - `imread` docstring for more details. - - Returns - ------- - fromimage : ndarray - The different colour bands/channels are stored in the - third dimension, such that a grey-image is MxN, an - RGB-image MxNx3 and an RGBA-image MxNx4. - - """ - if not Image.isImageType(im): - raise TypeError("Input is not a PIL image.") - - if mode is not None: - if mode != im.mode: - im = im.convert(mode) - elif im.mode == 'P': - # Mode 'P' means there is an indexed "palette". If we leave the mode - # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D - # containing the indices into the palette, and not a 3-D array - # containing the RGB or RGBA values. - if 'transparency' in im.info: - im = im.convert('RGBA') - else: - im = im.convert('RGB') - - if flatten: - im = im.convert('F') - elif im.mode == '1': - # Workaround for crash in PIL. When im is 1-bit, the call array(im) - # can cause a seg. fault, or generate garbage. See - # https://github.com/scipy/scipy/issues/2138 and - # https://github.com/python-pillow/Pillow/issues/350. - # - # This converts im from a 1-bit image to an 8-bit image. - im = im.convert('L') - - a = array(im) - return a - - -_errstr = "Mode is unknown or incompatible with input array shape." - - -@numpy.deprecate(message="`toimage` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.2.0.\n" - "Use Pillow's ``Image.fromarray`` directly instead.") -def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None, - mode=None, channel_axis=None): - """Takes a numpy array and returns a PIL image. - - This function is only available if Python Imaging Library (PIL) is installed. - - The mode of the PIL image depends on the array shape and the `pal` and - `mode` keywords. - - For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values - (from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode - is given as 'F' or 'I' in which case a float and/or integer array is made. - - .. warning:: - - This function uses `bytescale` under the hood to rescale images to use - the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. - It will also cast data for 2-D images to ``uint32`` for ``mode=None`` - (which is the default). - - Notes - ----- - For 3-D arrays, the `channel_axis` argument tells which dimension of the - array holds the channel data. - - For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' - by default or 'YCbCr' if selected. - - The numpy array must be either 2 dimensional or 3 dimensional. - - """ - data = asarray(arr) - if iscomplexobj(data): - raise ValueError("Cannot convert a complex-valued array.") - shape = list(data.shape) - valid = len(shape) == 2 or ((len(shape) == 3) and - ((3 in shape) or (4 in shape))) - if not valid: - raise ValueError("'arr' does not have a suitable array shape for " - "any mode.") - if len(shape) == 2: - shape = (shape[1], shape[0]) # columns show up first - if mode == 'F': - data32 = data.astype(numpy.float32) - image = Image.frombytes(mode, shape, data32.tostring()) - return image - if mode in [None, 'L', 'P']: - bytedata = bytescale(data, high=high, low=low, - cmin=cmin, cmax=cmax) - image = Image.frombytes('L', shape, bytedata.tostring()) - if pal is not None: - image.putpalette(asarray(pal, dtype=uint8).tostring()) - # Becomes a mode='P' automagically. - elif mode == 'P': # default gray-scale - pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] * - ones((3,), dtype=uint8)[newaxis, :]) - image.putpalette(asarray(pal, dtype=uint8).tostring()) - return image - if mode == '1': # high input gives threshold for 1 - bytedata = (data > high) - image = Image.frombytes('1', shape, bytedata.tostring()) - return image - if cmin is None: - cmin = amin(ravel(data)) - if cmax is None: - cmax = amax(ravel(data)) - data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low - if mode == 'I': - data32 = data.astype(numpy.uint32) - image = Image.frombytes(mode, shape, data32.tostring()) - else: - raise ValueError(_errstr) - return image - - # if here then 3-d array with a 3 or a 4 in the shape length. - # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' - if channel_axis is None: - if (3 in shape): - ca = numpy.flatnonzero(asarray(shape) == 3)[0] - else: - ca = numpy.flatnonzero(asarray(shape) == 4) - if len(ca): - ca = ca[0] - else: - raise ValueError("Could not find channel dimension.") - else: - ca = channel_axis - - numch = shape[ca] - if numch not in [3, 4]: - raise ValueError("Channel axis dimension is not valid.") - - bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax) - if ca == 2: - strdata = bytedata.tostring() - shape = (shape[1], shape[0]) - elif ca == 1: - strdata = transpose(bytedata, (0, 2, 1)).tostring() - shape = (shape[2], shape[0]) - elif ca == 0: - strdata = transpose(bytedata, (1, 2, 0)).tostring() - shape = (shape[2], shape[1]) - if mode is None: - if numch == 3: - mode = 'RGB' - else: - mode = 'RGBA' - - if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']: - raise ValueError(_errstr) - - if mode in ['RGB', 'YCbCr']: - if numch != 3: - raise ValueError("Invalid array shape for mode.") - if mode in ['RGBA', 'CMYK']: - if numch != 4: - raise ValueError("Invalid array shape for mode.") - - # Here we know data and mode is correct - image = Image.frombytes(mode, shape, strdata) - return image - - -@numpy.deprecate(message="`imrotate` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.2.0.\n" - "Use ``skimage.transform.rotate`` instead.") -def imrotate(arr, angle, interp='bilinear'): - """ - Rotate an image counter-clockwise by angle degrees. - - This function is only available if Python Imaging Library (PIL) is installed. - - .. warning:: - - This function uses `bytescale` under the hood to rescale images to use - the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. - It will also cast data for 2-D images to ``uint32`` for ``mode=None`` - (which is the default). - - Parameters - ---------- - arr : ndarray - Input array of image to be rotated. - angle : float - The angle of rotation. - interp : str, optional - Interpolation - - - 'nearest' : for nearest neighbor - - 'bilinear' : for bilinear - - 'lanczos' : for lanczos - - 'cubic' : for bicubic - - 'bicubic' : for bicubic - - Returns - ------- - imrotate : ndarray - The rotated array of image. - - """ - arr = asarray(arr) - func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} - im = toimage(arr) - im = im.rotate(angle, resample=func[interp]) - return fromimage(im) - - -@numpy.deprecate(message="`imshow` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.2.0.\n" - "Use ``matplotlib.pyplot.imshow`` instead.") -def imshow(arr): - """ - Simple showing of an image through an external viewer. - - This function is only available if Python Imaging Library (PIL) is installed. - - Uses the image viewer specified by the environment variable - SCIPY_PIL_IMAGE_VIEWER, or if that is not defined then `see`, - to view a temporary file generated from array data. - - .. warning:: - - This function uses `bytescale` under the hood to rescale images to use - the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. - It will also cast data for 2-D images to ``uint32`` for ``mode=None`` - (which is the default). - - Parameters - ---------- - arr : ndarray - Array of image data to show. - - Returns - ------- - None - - Examples - -------- - >>> a = np.tile(np.arange(255), (255,1)) - >>> from scipy import misc - >>> misc.imshow(a) - - """ - im = toimage(arr) - fnum, fname = tempfile.mkstemp('.png') - try: - im.save(fname) - except Exception: - raise RuntimeError("Error saving temporary image data.") - - import os - os.close(fnum) - - cmd = os.environ.get('SCIPY_PIL_IMAGE_VIEWER', 'see') - status = os.system("%s %s" % (cmd, fname)) - - os.unlink(fname) - if status != 0: - raise RuntimeError('Could not execute image viewer.') - - -@numpy.deprecate(message="`imresize` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.3.0.\n" - "Use Pillow instead: ``numpy.array(Image.fromarray(arr).resize())``.") -def imresize(arr, size, interp='bilinear', mode=None): - """ - Resize an image. - - This function is only available if Python Imaging Library (PIL) is installed. - - .. warning:: - - This function uses `bytescale` under the hood to rescale images to use - the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. - It will also cast data for 2-D images to ``uint32`` for ``mode=None`` - (which is the default). - - Parameters - ---------- - arr : ndarray - The array of image to be resized. - size : int, float or tuple - * int - Percentage of current size. - * float - Fraction of current size. - * tuple - Size of the output image (height, width). - - interp : str, optional - Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', - 'bicubic' or 'cubic'). - mode : str, optional - The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing. - If ``mode=None`` (the default), 2-D images will be treated like - ``mode='L'``, i.e. casting to long integer. For 3-D and 4-D arrays, - `mode` will be set to ``'RGB'`` and ``'RGBA'`` respectively. - - Returns - ------- - imresize : ndarray - The resized array of image. - - See Also - -------- - toimage : Implicitly used to convert `arr` according to `mode`. - scipy.ndimage.zoom : More generic implementation that does not use PIL. - - """ - im = toimage(arr, mode=mode) - ts = type(size) - if issubdtype(ts, numpy.signedinteger): - percent = size / 100.0 - size = tuple((array(im.size)*percent).astype(int)) - elif issubdtype(type(size), numpy.floating): - size = tuple((array(im.size)*size).astype(int)) - else: - size = (size[1], size[0]) - func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} - imnew = im.resize(size, resample=func[interp]) - return fromimage(imnew) - - -@numpy.deprecate(message="`imfilter` is deprecated in SciPy 1.0.0, " - "and will be removed in 1.2.0.\n" - "Use Pillow filtering functionality directly.") -def imfilter(arr, ftype): - """ - Simple filtering of an image. - - This function is only available if Python Imaging Library (PIL) is installed. - - .. warning:: - - This function uses `bytescale` under the hood to rescale images to use - the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``. - It will also cast data for 2-D images to ``uint32`` for ``mode=None`` - (which is the default). - - Parameters - ---------- - arr : ndarray - The array of Image in which the filter is to be applied. - ftype : str - The filter that has to be applied. Legal values are: - 'blur', 'contour', 'detail', 'edge_enhance', 'edge_enhance_more', - 'emboss', 'find_edges', 'smooth', 'smooth_more', 'sharpen'. - - Returns - ------- - imfilter : ndarray - The array with filter applied. - - Raises - ------ - ValueError - *Unknown filter type.* If the filter you are trying - to apply is unsupported. - - """ - _tdict = {'blur': ImageFilter.BLUR, - 'contour': ImageFilter.CONTOUR, - 'detail': ImageFilter.DETAIL, - 'edge_enhance': ImageFilter.EDGE_ENHANCE, - 'edge_enhance_more': ImageFilter.EDGE_ENHANCE_MORE, - 'emboss': ImageFilter.EMBOSS, - 'find_edges': ImageFilter.FIND_EDGES, - 'smooth': ImageFilter.SMOOTH, - 'smooth_more': ImageFilter.SMOOTH_MORE, - 'sharpen': ImageFilter.SHARPEN - } - - im = toimage(arr) - if ftype not in _tdict: - raise ValueError("Unknown filter type.") - return fromimage(im.filter(_tdict[ftype])) diff --git a/scipy/misc/tests/data/3x3x3.png b/scipy/misc/tests/data/3x3x3.png deleted file mode 100644 index 554b7e62c674dc0ff73353069cc3bf5415366eda..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 102 zcmeAS@N?(olHy`uVBq!ia0vp^%plCc1SD^IDZK!3ik&<|I5;?NKlFb9;>M)AGk!-+)3Ao-U3d9M_ZooImhkJ|mZel{XE)7O>#J~yivlk#&9PH&))nx~6n2*>s0Kj#m8n9s;*U}Ryj s$zTzOp@D^gTtY&kgRt#`11A_5o|duoi1B7!1?ph%boFyt=akR{0Ac+ng8%>k diff --git a/scipy/misc/tests/data/4x3x3.png b/scipy/misc/tests/data/4x3x3.png deleted file mode 100644 index 14830b4a62b148c3f5c659d75684f2f952510e8f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 125 zcmeAS@N?(olHy`uVBq!ia0vp^%s|Y-!2~3a_zML9Db50q$YKTtZeb8+WSBKa0w~B{ z;_2(kexI9FoR4?fOU3;_AsJ5>#}JO|w#ODSGB9v3D}4KZaq|k!OJXOyzLd)@f5Q5o THMQvjP#uG(tDnm{r-UW|(ib03 diff --git a/scipy/misc/tests/data/4x3x4.png b/scipy/misc/tests/data/4x3x4.png deleted file mode 100644 index a33aa344f766c8e3e5a9017cc6aa306e7b3fc1e5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 144 zcmeAS@N?(olHy`uVBq!ia0vp^%s|Y-!3HF~bz9ki6kC$Fy9>jA5L~c#`DCC7XMsm# zF#`j)FbFd;%$g$s6l5>)^mS#w&&?_>CAny>(sZDZtfz}(2*>s0KmY&#uV>Rp_;db1 j!vljq?#~PqrZ6)2mva6-xAkZeP$7e-tDnm{r-UW|9cCzl diff --git a/scipy/misc/tests/data/4x4x3.png b/scipy/misc/tests/data/4x4x3.png deleted file mode 100644 index 5811d5a68deb439a8fec86c01923a3fe44db69ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 83 zcmeAS@N?(olHy`uVBq!ia0vp^EFjFm1SHiab7}%9DNh&2kcv6UfByfsXXY_bn!&@< g!}EWlrH2i}-5ad`ugysQ3slA6>FVdQ&MBb@0M9rUHUIzs diff --git a/scipy/misc/tests/data/4x4x4.png b/scipy/misc/tests/data/4x4x4.png deleted file mode 100644 index 337ab9a62b728d5ee02d9381aec395f9bf139b00..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 86 zcmeAS@N?(olHy`uVBq!ia0vp^EFjFm1|(O0oL2{=WIbIRLn`JZ|M~y_e?6PZfgkf5 k8ABT$+V5guUM0q`--zPfz&g2y85}Sb4q9e06^ay_y7O^ diff --git a/scipy/misc/tests/data/4x5x4.png b/scipy/misc/tests/data/4x5x4.png deleted file mode 100644 index 287dcbb2e12cb83a27d9fe42cd42950a37f9c3ab..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 159 zcmeAS@N?(olHy`uVBq!ia0vp^tU%1d!3HGVjK4YnDYhhUcNd2LAh=-f^2tCE&H|6f zVg?3oVGw3ym^DWND9B#o>FdgVpPN-&%5vssrg)%`j;D)b2*>r<`VMM5>*81WAJqKb6Mw<&;$S?{wU4> diff --git a/scipy/misc/tests/data/5x3x3.png b/scipy/misc/tests/data/5x3x3.png deleted file mode 100644 index 7be556358c65008da48dc05d8fb3a1cb8c0f3f90..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 149 zcmeAS@N?(olHy`uVBq!ia0vp^%s|Y_!2~4vg%5rOQfx`y?k)`fL2$v|<&%LToCO|{ z#S9GG!XV7ZFl&wkP>{XE)7O>#J~yklf#6}?v|gZ)lBbJf2*-8XV;dP66nK~w9zFf~ pX~zoTCAt%|FYdo?y7{Iu`{yc-7Ki@@`+=$%JYD@<);T3K0RZk^DYyUt diff --git a/scipy/misc/tests/data/5x3x4.png b/scipy/misc/tests/data/5x3x4.png deleted file mode 100644 index 595b18239528e86c669699d6b2c4c52d55536324..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 157 zcmeAS@N?(olHy`uVBq!ia0vp^%s|Y_!3HE73d0rwDYhhUcNd2LAh=-f^2tCE&H|6f zVg?3oVGw3ym^DWND9B#o>FdgVpPN-&S8S2$d?TQcmZytj2*>s0KmY&#uV>Rp_;db1 w!vljq?#~PqrWl%=7#PV}6nscvZhq{@@Ock^OHBLaBS2jYp00i_>zopr0J3Q=EdT%j diff --git a/scipy/misc/tests/data/5x4x3.png b/scipy/misc/tests/data/5x4x3.png deleted file mode 100644 index 91df79aa6c0667fd1c171d669d98f9c1afd5cc70..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 91 zcmeAS@N?(olHy`uVBq!ia0vp^EI`c4!2~4Vp1;)tq?9~e978JR_#WRV$iTqS?6Apl p_VH7}T6VcNi?|oc$7^XQFgxGk_}_Krvpi5WgQu&X%Q~loCIH|48M6QY diff --git a/scipy/misc/tests/data/5x4x4.png b/scipy/misc/tests/data/5x4x4.png deleted file mode 100644 index f0d3fffcfcb71994ccf9325d3d385254ebd3a916..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 97 zcmeAS@N?(olHy`uVBq!ia0vp^EI`c4!3HFg*81KCQW~Bvjv*CuVox9BWia45*m#m7eBkImb*XxKdX&eiJN?JNw`#^CAd=d#Wzp$Pyq!5};U diff --git a/scipy/misc/tests/data/5x5x4.png b/scipy/misc/tests/data/5x5x4.png deleted file mode 100644 index 1f248692fb633ec6c19e48df4155edf78db38cc3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 112 zcmeAS@N?(olHy`uVBq!ia0vp^tRT$61|)m))t&+=b59q?kcv66T^o5181Nj{IM9A` z{!5ekWqh%d3LZEFcpf@<#z%2(?^4ZJ`7P5Pt-Q^2yDeUV`6q+QZ2`ff6{?egMlg7~ L`njxgN@xNA?K33W diff --git a/scipy/misc/tests/data/blocks2bit.png b/scipy/misc/tests/data/blocks2bit.png deleted file mode 100644 index 95cacf5275e266f397447826e344a645f8a1863e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 77 zcmeAS@N?(olHy`uVBq!ia0vp^JRr;jBp58$NzVdOBAzaeAr*6y6B2k+5444^{ol{a Z%<$offbRBL1#>{^JYD@<);T3K0RUeR6T1Ka diff --git a/scipy/misc/tests/data/box1.png b/scipy/misc/tests/data/box1.png deleted file mode 100644 index a5d1e9eafeca631ba74900b1a5b5f6a67d3d806a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 208 zcmeAS@N?(olHy`uVBq!ia0vp^1|ZA`BpB)|k7xlYmUKs7M+U~W1%@xC#RK_FN#5=* z42*ZXgqHw$;+`&!ArhC9fAF*M@bWkuu<)Ab@5m&?;Kin_{Gs5;cc2p064!{5l*E!$ ztK_0oAjM#0U}&mqV5w_l5@KX*WoTe!Y@lmkVr5{kRN?Im6b-rgDVb@Nm>P@>AsY5A Sh~oolVDNPHb6Mw<&;$VMw=^yQ diff --git a/scipy/misc/tests/data/foo3x5x4indexed.png b/scipy/misc/tests/data/foo3x5x4indexed.png deleted file mode 100644 index cc969d03dd263fde0293045012129ad1e01ea824..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 116 zcmeAS@N?(olHy`uVBq!ia0vp^tU%1n!3-pGUYMQ&Qak}ZA+8Jz{~6@}*E9S-aNz$2 z)p@}{ah8%GzhH*{{~6}#uLo%o_H=O!shE?Tz{JRwmd2)#z{1GD@Qp!iqy9%JpfrQ0 LtDnm{r-UW|Gdmrm diff --git a/scipy/misc/tests/data/icon.png b/scipy/misc/tests/data/icon.png deleted file mode 100644 index e9037e282b4f5f77acd46172d5523deb73c2e73f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2122 zcmV-Q2(|Z#P)j zVLLb>u?dCHK*9!u6rfAh1&Ig5D_UtQ<+<8QtXg>KQ+HobS4vrlN)>3|_hFG%Li>W+ zG%Sir0Bs9t0#s2#fY>B)9OuKaW6#*W`!E@AVv;y^9CaV|FO57N-FxQz&;8$X&%Fa% z$LE0f1+LFGb4o6O03MHr3l}c1d-rZKnM^5Niz1WBkj-WRc=Y3sy!qh=E@&DfaON(& z0saBZu1DZd5!+(r*XMM(IQiat95`?QP17poyaaeX9=u*J0Drl0gGWO{{6W`wAjJEx zpm74Gb@&&sSV-XSTO?3FH95)Et5;D~6_LwT#QB^MSb*;CZo0d>*Su5!9|Cm{xD40E zpa-~7NZ{~k;B0}n>oUzyn}B z6k>RI7{A|-#bR0O2Jm~p0>7RD|7!^Aa2JSgK8Y`0gpWT4iv@By;}@W(2kzcAemgoq z*Nx|2eFa~Cy?Ni&ZA!%NfJO*>YRKu&z+X2f(9mE+@zyN}g+NuoW`m!9HtzTDhxvK< z_FJQUAOIhIWVAO&ps?@A$Xl-w7`l)o2}zP5li`i7bJJ?25is+eU6Pm*LRrdW-T~_K z650?}t09$dzJaGtjYx(I-PBs6VNnhYJxAngiHpWKZnbgVQj4Uc$d*v2xID= zJ9m!Y-o4x(7+`L04y)C=5>G49nHl=N`;K5R_}k;hkDn<%0`WN9yJt+i>1pHtR$#G| z+*b&J5CV_K!>^7U!Rc_2NF?%;T2T~6M@M<|@F5O|V|PPC!|u%q*lfm&uU&)T;q}|p z*TbPhrUx)O3aJz*O7XrMK-YDwRx1Y&9$fQWRaInJMpad^*(|!QmsUy?gv+=tUW5}T zjAyrQ!6%;>T33{^O_zer3H0{D>(^`Yb{>(4kq!fa|K|Gset7xPxbN#Th^*JQ?3lg1 z%*p^Wz%#Q^A%WiBwMNXFnVHS=`8s_IQ3Z?w>6QC(AcIlneZL-ojUus~Fs_0HSO!8s z=QarZG0I9|vAMs$sj}NY*n8jrcBgY|#hfHzu~_oiC4axtR{*xu1ty% z_|q2KO4V-n2j|b8rKzb2-7FtA^j#vG&5}$eQ4|H6%|O3A;{%&2qBD` zsL}+$80t(WliyP23$Oy1&lE)=91b%vF@eM3z-F`MBfOqe3hkNmXCqkHAqXLmWf_mh zLpq&ia&i*4+l|}pF6vY%D0?L~qxA6ML+;CcR`t<3x2^>Crc;^cs!2J=VNAOX3fWyic4sCc(|l`zL0<-FZdP#_SXv$K;YPoA*2xXA46Y~@Bkk|b<48<9wa z>({T-+}unqm&@;}%7w*ZAruNRJv~h#kzi(K2D{ykqA1MG&8_) zn4O(vd3l-Y>S{VWJK42sS4o@mLg&r=?{qqGyWPmLjNNW$d3kwz1SCm9(==kS7~|vP z)Ya9|)z!tld-rH;Y~<+CqXYs0>~?$E1Xd%L%jM6DR8>V))$K`0k|d(hC@)^TU}P?46Mn?GQr=Qrre?PHUjK01;g25my zEiDz@ux|w=jlj~<5)U3cAQ%kN-ri0+ohBZS<8(S{Z*M0U46b|XRw<~enlCBJngjFm z^W3>}hx6yp)7I9;z{)3}+S*!Tu^5?5rXsrE2#TUGGBQFcl`3x`OQli_4i56_)hkq0 zC7Dc;$z+ga8HdC1*7w~?Ly{y?sT5OFQ~9}83W2^{E|*CrlhoAIobh_SXDk*=emF}# z5-bO%{Xig47J>eiix+w4ECIA2c07*qoM6N<$g1wmt Ad;kCd diff --git a/scipy/misc/tests/data/icon_mono.png b/scipy/misc/tests/data/icon_mono.png deleted file mode 100644 index 612c9c604edaaa47d198f5eb109edd978c2a57cc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 449 zcmeAS@N?(olHy`uVBq!ia0vp^1|ZDD3?#L31Vw-pXMj(LD+9xc|Ns93c|9gUbAc3N zNswPKgTu2MX&_FLx4R2N2dk_HNO^%rWHAE+w=f7ZGR&GI0Tg5}@$_|Nzs@1UBc!t` z?LaF~kD;fFV~EE2&`H*LEQTCy7sWJ-Cd+Vm`%GmM`7Kh-sq-*v_C_PS)D1Il-o0?+ z#q+~9JPg|f*)Hd8T61Ul=g!lgm>={=9{={a=-VBJZybjlzw+%}u5-**{Ab(U85bf> znaLI0IM$|Q`90piQQ3RaWRVwhZp?TaaBm^E^u%^WKUayXIRa_ncQ$n@h@UrGd$RGw z`69N!nW6m`quzHMf5X1f==7OqeA?Gq*6b0n7xh2t@9Y-GqdaMsc$d1~;)h9Fo?J}H z&{+L-Nq6A76VvoscP}Y165JNwwLNOF+1YzS*XwV7Skue#@?=Erz3(5Vy-o56oqW{V z&T4Yh{mqdEcDwI9O*u7ds*c*kbx&J^o=<%wyRLIjTS7%7Nj@;OXk;vd$@?2>_GXx@iCa diff --git a/scipy/misc/tests/data/icon_mono_flat.png b/scipy/misc/tests/data/icon_mono_flat.png deleted file mode 100644 index c42b9a025a0e65d3166c12e2ddf191472d2f7fc4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 412 zcmeAS@N?(olHy`uVBq!ia0vp^1|ZDH3?y^UWFG-iYymzYu0Z<#|Nl#G&c6#}aTa() z7Bet#3xhBt!>ll`vXDhz^)Y=Lr&nw~C>ArhB$Cm4Dh4iIRoH)!@g z_DHFt@8yG#02ih{s}5(KEeYa|%ey8oN;v9UP!PiYdXb5KMqsA)w19GU^_AXhyTyca zUIgTFJliw<*R;pG-`_i5xWJr&rKm;YiG#n(KE9uAp95#kY4>w^#dXZ?e14yf%A0q8 z4>Bb6OBUL6g?zthsjKjAnYZ)y3tqev-z(<*l~~2^J7eV|*`zi@Yay4!SKKcgXG*?r z*z-PXBYRQt&4k%2gA~>Jjkadz1=&t$aa3j4`fciFU$wc1N|zOFP|5Vl_C9`=)AOq9 zxtzHjpZ=>@pU>OpH9bOX?!P4^m!+*U)lTn-cFVdQ&MBb@ E032wg4FCWD diff --git a/scipy/misc/tests/data/pattern4bit.png b/scipy/misc/tests/data/pattern4bit.png deleted file mode 100644 index 58411fe53b49db7d67dccd74c1984bdee34ef675..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 169 zcmeAS@N?(olHy`uVBq!ia0vp^@<7bP0wfr!%CB4mQVJ!m5hX6J!OlSpxtV#Hxuv-p zxrr5Ex=2SMSyM;B&{Q*A==3k3S{F|j$B>FS$q6B4zVBmdzu8WyJD|{^x1dEYA*V;} z!J{TM0V%02tqIenwP^+Lv1l;KIy5L52(wIJ4QpiBbRf^bfrEje^-1!r#;+*{L2mGL L^>bP0l+XkKR+=-k diff --git a/scipy/misc/tests/test_common.py b/scipy/misc/tests/test_common.py index feea58096296..0837b7db12cc 100644 --- a/scipy/misc/tests/test_common.py +++ b/scipy/misc/tests/test_common.py @@ -1,26 +1,9 @@ from __future__ import division, print_function, absolute_import import pytest -from numpy.testing import assert_equal, assert_allclose, assert_almost_equal -from scipy._lib._numpy_compat import suppress_warnings +from numpy.testing import assert_equal, assert_almost_equal -from scipy.misc import pade, logsumexp, face, ascent, electrocardiogram -from scipy.special import logsumexp as sc_logsumexp - - -def test_logsumexp(): - # make sure logsumexp can be imported from either scipy.misc or - # scipy.special - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "`logsumexp` is deprecated") - assert_allclose(logsumexp([0, 1]), sc_logsumexp([0, 1]), atol=1e-16) - - -def test_pade(): - # make sure scipy.misc.pade exists - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "`pade` is deprecated") - pade([1, 2], 1) +from scipy.misc import face, ascent, electrocardiogram def test_face(): diff --git a/scipy/misc/tests/test_pilutil.py b/scipy/misc/tests/test_pilutil.py deleted file mode 100644 index a5a215331d0f..000000000000 --- a/scipy/misc/tests/test_pilutil.py +++ /dev/null @@ -1,283 +0,0 @@ -from __future__ import division, print_function, absolute_import - -import os.path -import tempfile -import shutil -import numpy as np -import glob - -import pytest -from pytest import raises as assert_raises -from numpy.testing import (assert_equal, assert_allclose, - assert_array_equal, assert_) -from scipy._lib._numpy_compat import suppress_warnings -from scipy import misc -from numpy.ma.testutils import assert_mask_equal - -try: - import PIL.Image -except ImportError: - _have_PIL = False -else: - _have_PIL = True - - -# Function / method decorator for skipping PIL tests on import failure -_pilskip = pytest.mark.skipif(not _have_PIL, reason='Need to import PIL for this test') - -datapath = os.path.dirname(__file__) - -@_pilskip -class TestPILUtil(object): - def test_imresize(self): - im = np.random.random((10, 20)) - for T in np.sctypes['float'] + [float]: - # 1.1 rounds to below 1.1 for float16, 1.101 works - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im1 = misc.imresize(im, T(1.101)) - assert_equal(im1.shape, (11, 22)) - - def test_imresize2(self): - im = np.random.random((20, 30)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im2 = misc.imresize(im, (30, 40), interp='bicubic') - assert_equal(im2.shape, (30, 40)) - - def test_imresize3(self): - im = np.random.random((15, 30)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im2 = misc.imresize(im, (30, 60), interp='nearest') - assert_equal(im2.shape, (30, 60)) - - def test_imresize4(self): - im = np.array([[1, 2], - [3, 4]]) - # Check that resizing by target size, float and int are the same - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im2 = misc.imresize(im, (4, 4), mode='F') # output size - im3 = misc.imresize(im, 2., mode='F') # fraction - im4 = misc.imresize(im, 200, mode='F') # percentage - assert_equal(im2, im3) - assert_equal(im2, im4) - - def test_imresize5(self): - im = np.random.random((25, 15)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im2 = misc.imresize(im, (30, 60), interp='lanczos') - assert_equal(im2.shape, (30, 60)) - - def test_bytescale(self): - x = np.array([0, 1, 2], np.uint8) - y = np.array([0, 1, 2]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - assert_equal(misc.bytescale(x), x) - assert_equal(misc.bytescale(y), [0, 128, 255]) - - def test_bytescale_keywords(self): - x = np.array([40, 60, 120, 200, 300, 500]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - res_lowhigh = misc.bytescale(x, low=10, high=143) - assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143]) - res_cmincmax = misc.bytescale(x, cmin=60, cmax=300) - assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255]) - assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4]) - - def test_bytescale_cscale_lowhigh(self): - a = np.arange(10) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - actual = misc.bytescale(a, cmin=3, cmax=6, low=100, high=200) - expected = [100, 100, 100, 100, 133, 167, 200, 200, 200, 200] - assert_equal(actual, expected) - - def test_bytescale_mask(self): - a = np.ma.MaskedArray(data=[1, 2, 3], mask=[False, False, True]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - actual = misc.bytescale(a) - expected = [0, 255, 3] - assert_equal(expected, actual) - assert_mask_equal(a.mask, actual.mask) - assert_(isinstance(actual, np.ma.MaskedArray)) - - def test_bytescale_rounding(self): - a = np.array([-0.5, 0.5, 1.5, 2.5, 3.5]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - actual = misc.bytescale(a, cmin=0, cmax=10, low=0, high=10) - expected = [0, 1, 2, 3, 4] - assert_equal(actual, expected) - - def test_bytescale_low_greaterthan_high(self): - with assert_raises(ValueError): - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - misc.bytescale(np.arange(3), low=10, high=5) - - def test_bytescale_low_lessthan_0(self): - with assert_raises(ValueError): - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - misc.bytescale(np.arange(3), low=-1) - - def test_bytescale_high_greaterthan_255(self): - with assert_raises(ValueError): - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - misc.bytescale(np.arange(3), high=256) - - def test_bytescale_low_equals_high(self): - a = np.arange(3) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - actual = misc.bytescale(a, low=10, high=10) - expected = [10, 10, 10] - assert_equal(actual, expected) - - def test_imsave(self): - picdir = os.path.join(datapath, "data") - for png in glob.iglob(picdir + "/*.png"): - with suppress_warnings() as sup: - # PIL causes a Py3k ResourceWarning - sup.filter(message="unclosed file") - sup.filter(DeprecationWarning) - img = misc.imread(png) - tmpdir = tempfile.mkdtemp() - try: - fn1 = os.path.join(tmpdir, 'test.png') - fn2 = os.path.join(tmpdir, 'testimg') - with suppress_warnings() as sup: - # PIL causes a Py3k ResourceWarning - sup.filter(message="unclosed file") - sup.filter(DeprecationWarning) - misc.imsave(fn1, img) - misc.imsave(fn2, img, 'PNG') - - with suppress_warnings() as sup: - # PIL causes a Py3k ResourceWarning - sup.filter(message="unclosed file") - sup.filter(DeprecationWarning) - data1 = misc.imread(fn1) - data2 = misc.imread(fn2) - assert_allclose(data1, img) - assert_allclose(data2, img) - assert_equal(data1.shape, img.shape) - assert_equal(data2.shape, img.shape) - finally: - shutil.rmtree(tmpdir) - - -def check_fromimage(filename, irange, shape): - fp = open(filename, "rb") - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - img = misc.fromimage(PIL.Image.open(fp)) - fp.close() - imin, imax = irange - assert_equal(img.min(), imin) - assert_equal(img.max(), imax) - assert_equal(img.shape, shape) - - -@_pilskip -def test_fromimage(): - # Test generator for parametric tests - # Tuples in the list are (filename, (datamin, datamax), shape). - files = [('icon.png', (0, 255), (48, 48, 4)), - ('icon_mono.png', (0, 255), (48, 48, 4)), - ('icon_mono_flat.png', (0, 255), (48, 48, 3))] - for fn, irange, shape in files: - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - check_fromimage(os.path.join(datapath, 'data', fn), irange, shape) - - -@_pilskip -def test_imread_indexed_png(): - # The file `foo3x5x4indexed.png` was created with this array - # (3x5 is (height)x(width)): - data = np.array([[[127, 0, 255, 255], - [127, 0, 255, 255], - [127, 0, 255, 255], - [127, 0, 255, 255], - [127, 0, 255, 255]], - [[192, 192, 255, 0], - [192, 192, 255, 0], - [0, 0, 255, 0], - [0, 0, 255, 0], - [0, 0, 255, 0]], - [[0, 31, 255, 255], - [0, 31, 255, 255], - [0, 31, 255, 255], - [0, 31, 255, 255], - [0, 31, 255, 255]]], dtype=np.uint8) - - filename = os.path.join(datapath, 'data', 'foo3x5x4indexed.png') - with open(filename, 'rb') as f: - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im = misc.imread(f) - assert_array_equal(im, data) - - -@_pilskip -def test_imread_1bit(): - # box1.png is a 48x48 grayscale image with bit depth 1. - # The border pixels are 1 and the rest are 0. - filename = os.path.join(datapath, 'data', 'box1.png') - with open(filename, 'rb') as f: - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im = misc.imread(f) - assert_equal(im.dtype, np.uint8) - expected = np.zeros((48, 48), dtype=np.uint8) - # When scaled up from 1 bit to 8 bits, 1 becomes 255. - expected[:, 0] = 255 - expected[:, -1] = 255 - expected[0, :] = 255 - expected[-1, :] = 255 - assert_equal(im, expected) - - -@_pilskip -def test_imread_2bit(): - # blocks2bit.png is a 12x12 grayscale image with bit depth 2. - # The pattern is 4 square subblocks of size 6x6. Upper left - # is all 0, upper right is all 1, lower left is all 2, lower - # right is all 3. - # When scaled up to 8 bits, the values become [0, 85, 170, 255]. - filename = os.path.join(datapath, 'data', 'blocks2bit.png') - with open(filename, 'rb') as f: - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im = misc.imread(f) - assert_equal(im.dtype, np.uint8) - expected = np.zeros((12, 12), dtype=np.uint8) - expected[:6, 6:] = 85 - expected[6:, :6] = 170 - expected[6:, 6:] = 255 - assert_equal(im, expected) - - -@_pilskip -def test_imread_4bit(): - # pattern4bit.png is a 12(h) x 31(w) grayscale image with bit depth 4. - # The value in row j and column i is maximum(j, i) % 16. - # When scaled up to 8 bits, the values become [0, 17, 34, ..., 255]. - filename = os.path.join(datapath, 'data', 'pattern4bit.png') - with open(filename, 'rb') as f: - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - im = misc.imread(f) - assert_equal(im.dtype, np.uint8) - j, i = np.meshgrid(np.arange(12), np.arange(31), indexing='ij') - expected = 17*(np.maximum(j, i) % 16).astype(np.uint8) - assert_equal(im, expected) - diff --git a/scipy/ndimage/__init__.py b/scipy/ndimage/__init__.py index 233fee0deef5..dd652f7b47ce 100644 --- a/scipy/ndimage/__init__.py +++ b/scipy/ndimage/__init__.py @@ -116,14 +116,6 @@ morphological_laplace white_tophat -Utility -======= - -.. autosummary:: - :toctree: generated/ - - imread - Load an image from a file - """ # Copyright (C) 2003-2005 Peter J. Verveer @@ -163,7 +155,6 @@ from .interpolation import * from .measurements import * from .morphology import * -from .io import * __version__ = '2.0' diff --git a/scipy/ndimage/io.py b/scipy/ndimage/io.py deleted file mode 100644 index 7e304067c342..000000000000 --- a/scipy/ndimage/io.py +++ /dev/null @@ -1,36 +0,0 @@ -from __future__ import division, print_function, absolute_import - -import numpy as np - - -_have_pil = True -try: - from scipy.misc.pilutil import imread as _imread -except ImportError: - _have_pil = False - - -__all__ = ['imread'] - - -# Use the implementation of `imread` in `scipy.misc.pilutil.imread`. -# If it weren't for the different names of the first arguments of -# ndimage.io.imread and misc.pilutil.imread, we could simplify this file -# by writing -# from scipy.misc.pilutil import imread -# Unfortunately, because the argument names are different, that -# introduces a backwards incompatibility. - -@np.deprecate(message="`imread` is deprecated in SciPy 1.0.0.\n" - "Use ``matplotlib.pyplot.imread`` instead.") -def imread(fname, flatten=False, mode=None): - if _have_pil: - return _imread(fname, flatten, mode) - raise ImportError("Could not import the Python Imaging Library (PIL)" - " required to load image files. Please refer to" - " http://pillow.readthedocs.org/en/latest/installation.html" - " for installation instructions.") - - -if _have_pil and _imread.__doc__ is not None: - imread.__doc__ = _imread.__doc__.replace('name : str', 'fname : str') diff --git a/scipy/ndimage/tests/test_io.py b/scipy/ndimage/tests/test_io.py deleted file mode 100644 index 36966f2ffbae..000000000000 --- a/scipy/ndimage/tests/test_io.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import division, print_function, absolute_import - -import pytest -from numpy.testing import assert_array_equal -from scipy._lib._numpy_compat import suppress_warnings -import scipy.ndimage as ndi - -import os - -try: - from PIL import Image - pil_missing = False -except ImportError: - pil_missing = True - - -@pytest.mark.skipif(pil_missing, reason="The Python Image Library could not be found.") -def test_imread(): - lp = os.path.join(os.path.dirname(__file__), 'dots.png') - with suppress_warnings() as sup: - # PIL causes a Py3k ResourceWarning - sup.filter(message="unclosed file") - sup.filter(DeprecationWarning) - img = ndi.imread(lp, mode="RGB") - assert_array_equal(img.shape, (300, 420, 3)) - - with suppress_warnings() as sup: - # PIL causes a Py3k ResourceWarning - sup.filter(message="unclosed file") - sup.filter(DeprecationWarning) - img = ndi.imread(lp, flatten=True) - assert_array_equal(img.shape, (300, 420)) - - with open(lp, 'rb') as fobj: - with suppress_warnings() as sup: - sup.filter(DeprecationWarning) - img = ndi.imread(fobj, mode="RGB") - assert_array_equal(img.shape, (300, 420, 3)) From 6bb3396e63665536fe10eeb0bd7b73315146b7b3 Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Mon, 19 Nov 2018 21:48:12 +0100 Subject: [PATCH 22/70] DEP: Remove deprecated functions from interpolate Xref #1408 --- scipy/interpolate/README | 4 - scipy/interpolate/__init__.py | 4 - scipy/interpolate/interpolate.py | 212 +------------------------------ 3 files changed, 2 insertions(+), 218 deletions(-) diff --git a/scipy/interpolate/README b/scipy/interpolate/README index 1a9f7e54dcd1..ecdff1986af7 100644 --- a/scipy/interpolate/README +++ b/scipy/interpolate/README @@ -91,12 +91,8 @@ interpolate.lagrange interpolate.ppform interpolate.spalde interpolate.splev -interpolate.spleval -interpolate.spline interpolate.splint -interpolate.splmake interpolate.splprep interpolate.splrep -interpolate.spltopp interpolate.sproot interpolate.test diff --git a/scipy/interpolate/__init__.py b/scipy/interpolate/__init__.py index 1b425f0bfbb5..9dfebddc15ee 100644 --- a/scipy/interpolate/__init__.py +++ b/scipy/interpolate/__init__.py @@ -163,10 +163,6 @@ .. autosummary:: :toctree: generated/ - spleval - spline - splmake - spltopp pchip """ diff --git a/scipy/interpolate/interpolate.py b/scipy/interpolate/interpolate.py index f7a5b23aee2f..61a446abeb53 100644 --- a/scipy/interpolate/interpolate.py +++ b/scipy/interpolate/interpolate.py @@ -1,13 +1,8 @@ -""" Classes for interpolating values. -""" from __future__ import division, print_function, absolute_import - -__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp', - 'lagrange', 'PPoly', 'BPoly', 'NdPPoly', +__all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly', 'RegularGridInterpolator', 'interpn'] - import itertools import warnings import functools @@ -15,9 +10,8 @@ import numpy as np from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d, - dot, ravel, poly1d, asarray, intp) + ravel, poly1d, asarray, intp) -import scipy.linalg import scipy.special as spec from scipy.special import comb @@ -2714,205 +2708,3 @@ def fromspline(cls, xk, cvals, order, fill=0.0): res /= fact sivals[order-m, :] = res return cls(sivals, xk, fill=fill) - - -# The 3 private functions below can be called by splmake(). - - -def _dot0(a, b): - """Similar to numpy.dot, but sum over last axis of a and 1st axis of b""" - if b.ndim <= 2: - return dot(a, b) - else: - axes = list(range(b.ndim)) - axes.insert(-1, 0) - axes.pop(0) - return dot(a, b.transpose(axes)) - - -def _find_smoothest(xk, yk, order, conds=None, B=None): - # construct Bmatrix, and Jmatrix - # e = J*c - # minimize norm(e,2) given B*c=yk - # if desired B can be given - # conds is ignored - N = len(xk)-1 - K = order - if B is None: - B = _fitpack._bsplmat(order, xk) - J = _fitpack._bspldismat(order, xk) - u, s, vh = scipy.linalg.svd(B) - ind = K-1 - V2 = vh[-ind:,:].T - V1 = vh[:-ind,:].T - A = dot(J.T,J) - tmp = dot(V2.T,A) - Q = dot(tmp,V2) - p = scipy.linalg.solve(Q, tmp) - tmp = dot(V2,p) - tmp = np.eye(N+K) - tmp - tmp = dot(tmp,V1) - tmp = dot(tmp,np.diag(1.0/s)) - tmp = dot(tmp,u.T) - return _dot0(tmp, yk) - - -# conds is a tuple of an array and a vector -# giving the left-hand and the right-hand side -# of the additional equations to add to B - - -def _find_user(xk, yk, order, conds, B): - lh = conds[0] - rh = conds[1] - B = np.concatenate((B, lh), axis=0) - w = np.concatenate((yk, rh), axis=0) - M, N = B.shape - if (M > N): - raise ValueError("over-specification of conditions") - elif (M < N): - return _find_smoothest(xk, yk, order, None, B) - else: - return scipy.linalg.solve(B, w) - - -# Remove the 3 private functions above as well when removing splmake -@np.deprecate(message="splmake is deprecated in scipy 0.19.0, " - "use make_interp_spline instead.") -def splmake(xk, yk, order=3, kind='smoothest', conds=None): - """ - Return a representation of a spline given data-points at internal knots - - Parameters - ---------- - xk : array_like - The input array of x values of rank 1 - yk : array_like - The input array of y values of rank N. `yk` can be an N-d array to - represent more than one curve, through the same `xk` points. The first - dimension is assumed to be the interpolating dimension and is the same - length of `xk`. - order : int, optional - Order of the spline - kind : str, optional - Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural', - 'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2 - conds : optional - Conds - - Returns - ------- - splmake : tuple - Return a (`xk`, `cvals`, `k`) representation of a spline given - data-points where the (internal) knots are at the data-points. - - """ - yk = np.asanyarray(yk) - - order = int(order) - if order < 0: - raise ValueError("order must not be negative") - if order == 0: - return xk, yk[:-1], order - elif order == 1: - return xk, yk, order - - try: - func = eval('_find_%s' % kind) - except Exception: - raise NotImplementedError - - # the constraint matrix - B = _fitpack._bsplmat(order, xk) - coefs = func(xk, yk, order, conds, B) - return xk, coefs, order - - -@np.deprecate(message="spleval is deprecated in scipy 0.19.0, " - "use BSpline instead.") -def spleval(xck, xnew, deriv=0): - """ - Evaluate a fixed spline represented by the given tuple at the new x-values - - The `xj` values are the interior knot points. The approximation - region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals` - should have length N+k where `k` is the order of the spline. - - Parameters - ---------- - (xj, cvals, k) : tuple - Parameters that define the fixed spline - xj : array_like - Interior knot points - cvals : array_like - Curvature - k : int - Order of the spline - xnew : array_like - Locations to calculate spline - deriv : int - Deriv - - Returns - ------- - spleval : ndarray - If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or - `xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]` - providing the interpolation of multiple curves. - - Notes - ----- - Internally, an additional `k`-1 knot points are added on either side of - the spline. - - """ - (xj, cvals, k) = xck - oldshape = np.shape(xnew) - xx = np.ravel(xnew) - sh = cvals.shape[1:] - res = np.empty(xx.shape + sh, dtype=cvals.dtype) - for index in np.ndindex(*sh): - sl = (slice(None),) + index - if issubclass(cvals.dtype.type, np.complexfloating): - res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv) - res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv) - else: - res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv) - res.shape = oldshape + sh - return res - - -# When `spltopp` gets removed, also remove the _ppform class. -@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, " - "use PPoly.from_spline instead.") -def spltopp(xk, cvals, k): - """Return a piece-wise polynomial object from a fixed-spline tuple.""" - return _ppform.fromspline(xk, cvals, k) - - -@np.deprecate(message="spline is deprecated in scipy 0.19.0, " - "use Bspline class instead.") -def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None): - """ - Interpolate a curve at new points using a spline fit - - Parameters - ---------- - xk, yk : array_like - The x and y values that define the curve. - xnew : array_like - The x values where spline should estimate the y values. - order : int - Default is 3. - kind : string - One of {'smoothest'} - conds : Don't know - Don't know - - Returns - ------- - spline : ndarray - An array of y values; the spline evaluated at the positions `xnew`. - - """ - return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew) From f811e81c4d6ad6937a138d2a40c38f8870172fcc Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Mon, 19 Nov 2018 22:15:54 +0100 Subject: [PATCH 23/70] CI: MAINT: Skip ckdtree test on pypy [ci skip] --- scipy/spatial/tests/test_kdtree.py | 74 ++++++++++++++++-------------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/scipy/spatial/tests/test_kdtree.py b/scipy/spatial/tests/test_kdtree.py index 13659a01da9d..b8b5ce725c5b 100644 --- a/scipy/spatial/tests/test_kdtree.py +++ b/scipy/spatial/tests/test_kdtree.py @@ -3,10 +3,11 @@ from __future__ import division, print_function, absolute_import -from numpy.testing import (assert_equal, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_) +from numpy.testing import (assert_equal, assert_array_equal, assert_, + assert_almost_equal, assert_array_almost_equal) from pytest import raises as assert_raises - +import pytest +from platform import python_implementation import numpy as np from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree from scipy.spatial.ckdtree import cKDTreeNode @@ -441,8 +442,8 @@ def test_random_ball_vectorized_compiled(): r = T.query_ball_point(np.random.randn(2,3,m),1) assert_equal(r.shape,(2,3)) assert_(isinstance(r[0,0],list)) - - + + def test_query_ball_point_multithreading(): np.random.seed(0) n = 5000 @@ -452,15 +453,15 @@ def test_query_ball_point_multithreading(): l1 = T.query_ball_point(points,0.003,n_jobs=1) l2 = T.query_ball_point(points,0.003,n_jobs=64) l3 = T.query_ball_point(points,0.003,n_jobs=-1) - + for i in range(n): if l1[i] or l2[i]: assert_array_equal(l1[i],l2[i]) - + for i in range(n): if l1[i] or l3[i]: assert_array_equal(l1[i],l3[i]) - + class two_trees_consistency: @@ -714,7 +715,7 @@ def test_consistency_with_python(self): M1 = self.T1.sparse_distance_matrix(self.T2, self.r) M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r) assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14) - + def test_against_logic_error_regression(self): # regression test for gh-5077 logic error np.random.seed(0) @@ -730,7 +731,7 @@ def test_ckdtree_return_types(self): for j in range(self.n): v = self.data1[i,:] - self.data2[j,:] ref[i,j] = np.dot(v,v) - ref = np.sqrt(ref) + ref = np.sqrt(ref) ref[ref > self.r] = 0. # test return type 'dict' dist = np.zeros((self.n,self.n)) @@ -740,7 +741,7 @@ def test_ckdtree_return_types(self): assert_array_almost_equal(ref, dist, decimal=14) # test return type 'ndarray' dist = np.zeros((self.n,self.n)) - r = self.T1.sparse_distance_matrix(self.T2, self.r, + r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='ndarray') for k in range(r.shape[0]): i = r['i'][k] @@ -749,11 +750,11 @@ def test_ckdtree_return_types(self): dist[i,j] = v assert_array_almost_equal(ref, dist, decimal=14) # test return type 'dok_matrix' - r = self.T1.sparse_distance_matrix(self.T2, self.r, + r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dok_matrix') assert_array_almost_equal(ref, r.todense(), decimal=14) # test return type 'coo_matrix' - r = self.T1.sparse_distance_matrix(self.T2, self.r, + r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='coo_matrix') assert_array_almost_equal(ref, r.todense(), decimal=14) @@ -858,11 +859,11 @@ def test_ckdtree_query_pairs(): l0 = sorted(brute) # test default return type s = T.query_pairs(r) - l1 = sorted(s) + l1 = sorted(s) assert_array_equal(l0,l1) # test return type 'set' s = T.query_pairs(r, output_type='set') - l1 = sorted(s) + l1 = sorted(s) assert_array_equal(l0,l1) # test return type 'ndarray' s = set() @@ -871,8 +872,8 @@ def test_ckdtree_query_pairs(): s.add((int(arr[i,0]),int(arr[i,1]))) l2 = sorted(s) assert_array_equal(l0,l2) - - + + def test_ball_point_ints(): # Regression test for #1373. x, y = np.mgrid[0:4, 0:4] @@ -942,10 +943,10 @@ def test_ckdtree_pickle_boxsize(): T1 = T1.query(points, k=5)[-1] T2 = T2.query(points, k=5)[-1] assert_array_equal(T1, T2) - + def test_ckdtree_copy_data(): # check if copy_data=True makes the kd-tree - # impervious to data corruption by modification of + # impervious to data corruption by modification of # the data arrray np.random.seed(0) n = 5000 @@ -957,7 +958,7 @@ def test_ckdtree_copy_data(): points[...] = np.random.randn(n, k) T2 = T.query(q, k=5)[-1] assert_array_equal(T1, T2) - + def test_ckdtree_parallel(): # check if parallel=True also generates correct # query results @@ -972,7 +973,7 @@ def test_ckdtree_parallel(): assert_array_equal(T1, T2) assert_array_equal(T1, T3) -def test_ckdtree_view(): +def test_ckdtree_view(): # Check that the nodes can be correctly viewed from Python. # This test also sanity checks each node in the cKDTree, and # thus verifies the internal structure of the kd-tree. @@ -981,11 +982,11 @@ def test_ckdtree_view(): k = 4 points = np.random.randn(n, k) kdtree = cKDTree(points) - + # walk the whole kd-tree and sanity check each node def recurse_tree(n): - assert_(isinstance(n, cKDTreeNode)) - if n.split_dim == -1: + assert_(isinstance(n, cKDTreeNode)) + if n.split_dim == -1: assert_(n.lesser is None) assert_(n.greater is None) assert_(n.indices.shape[0] <= kdtree.leafsize) @@ -995,7 +996,7 @@ def recurse_tree(n): x = n.lesser.data_points[:, n.split_dim] y = n.greater.data_points[:, n.split_dim] assert_(x.max() < y.min()) - + recurse_tree(kdtree.tree) # check that indices are correctly retrieved n = kdtree.tree @@ -1058,7 +1059,7 @@ def test_ckdtree_box(): dd1, ii1 = kdtree.query(data + 1.0, k, p=p) assert_almost_equal(dd, dd1) assert_equal(ii, ii1) - + dd1, ii1 = kdtree.query(data - 1.0, k, p=p) assert_almost_equal(dd, dd1) assert_equal(ii, ii1) @@ -1121,7 +1122,10 @@ def simulate_periodic_box(kdtree, data, k, boxsize, p): result['dd'][:] = dd result.sort(order='dd') return result['dd'][:, :k], result['ii'][:,:k] - + + +@pytest.mark.skipif(python_implementation() == 'PyPy', + reason="Fails on PyPy CI runs. See #9455") def test_ckdtree_memuse(): # unit test adaptation of gh-5630 @@ -1177,13 +1181,13 @@ def test_ckdtree_weights(): for i in range(10): # since weights are uniform, these shall agree: c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i)) - c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=(weights, weights)) - c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=(weights, None)) - c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=(None, weights)) - c5 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), + c5 = tree1.count_neighbors(tree1, np.linspace(0, 10, i), weights=weights) assert_array_equal(c1, c2) @@ -1222,12 +1226,12 @@ def test_ckdtree_count_neighbous_multiple_r(): nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False) assert_equal(n0, nnc.cumsum()) - for i, r in zip(itertools.permutations(i0), + for i, r in zip(itertools.permutations(i0), itertools.permutations(r0)): - # permute n0 by i and it shall agree + # permute n0 by i and it shall agree n = kdtree.count_neighbors(kdtree, r) assert_array_equal(n, n0[list(i)]) - + def test_len0_arrays(): # make sure len-0 arrays are handled correctly # in range queries (gh-5639) @@ -1276,7 +1280,7 @@ def test_len0_arrays(): def test_ckdtree_duplicated_inputs(): # check ckdtree with duplicated inputs - n = 1024 + n = 1024 for m in range(1, 8): data = np.concatenate([ np.ones((n // 2, m)) * 1, From eb09c57ae4c8b3edc61ccf7c34bb39fe0b99c3aa Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Mon, 19 Nov 2018 22:38:05 +0100 Subject: [PATCH 24/70] CI: MAINT: skip test_arnoldi on pypy runs --- .../sparse/linalg/isolve/tests/test_lgmres.py | 41 ++++++++++++------- scipy/spatial/tests/test_kdtree.py | 2 +- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/scipy/sparse/linalg/isolve/tests/test_lgmres.py b/scipy/sparse/linalg/isolve/tests/test_lgmres.py index 30134f2ddfe8..34f248bedddc 100644 --- a/scipy/sparse/linalg/isolve/tests/test_lgmres.py +++ b/scipy/sparse/linalg/isolve/tests/test_lgmres.py @@ -5,6 +5,9 @@ from numpy.testing import assert_, assert_allclose, assert_equal +import pytest +from platform import python_implementation + import numpy as np from numpy import zeros, array, allclose from scipy.linalg import norm @@ -16,13 +19,13 @@ from scipy._lib._numpy_compat import suppress_warnings -Am = csr_matrix(array([[-2,1,0,0,0,9], - [1,-2,1,0,5,0], - [0,1,-2,1,0,0], - [0,0,1,-2,1,0], - [0,3,0,1,-2,1], - [1,0,0,0,1,-2]])) -b = array([1,2,3,4,5,6]) +Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9], + [1, -2, 1, 0, 5, 0], + [0, 1, -2, 1, 0, 0], + [0, 0, 1, -2, 1, 0], + [0, 3, 0, 1, -2, 1], + [1, 0, 0, 0, 1, -2]])) +b = array([1, 2, 3, 4, 5, 6]) count = [0] @@ -38,7 +41,8 @@ def do_solve(**kw): count[0] = 0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") - x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw) + x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), + inner_m=6, tol=1e-14, **kw) count_0 = count[0] assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b)) return x0, count_0 @@ -65,7 +69,8 @@ def test_outer_v(self): assert_(len(outer_v) > 0) assert_(len(outer_v) <= 6) - x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, prepend_outer_v=True) + x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, + prepend_outer_v=True) assert_(count_1 == 2, count_1) assert_(count_1 < count_0/2) assert_(allclose(x1, x0, rtol=1e-14)) @@ -73,27 +78,33 @@ def test_outer_v(self): # --- outer_v = [] - x0, count_0 = do_solve(outer_k=6, outer_v=outer_v, store_outer_Av=False) + x0, count_0 = do_solve(outer_k=6, outer_v=outer_v, + store_outer_Av=False) assert_(array([v[1] is None for v in outer_v]).all()) assert_(len(outer_v) > 0) assert_(len(outer_v) <= 6) - x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, prepend_outer_v=True) + x1, count_1 = do_solve(outer_k=6, outer_v=outer_v, + prepend_outer_v=True) assert_(count_1 == 3, count_1) assert_(count_1 < count_0/2) assert_(allclose(x1, x0, rtol=1e-14)) + @pytest.mark.skipif(python_implementation() == 'PyPy', + reason="Fails on PyPy CI runs. See #9507") def test_arnoldi(self): np.random.rand(1234) - A = eye(10000) + rand(10000,10000,density=1e-4) + A = eye(10000) + rand(10000, 10000, density=1e-4) b = np.random.rand(10000) # The inner arnoldi should be equivalent to gmres with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") - x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=15, maxiter=1) - x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1) + x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]), + inner_m=15, maxiter=1) + x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), + restart=15, maxiter=1) assert_equal(flag0, 1) assert_equal(flag1, 1) @@ -134,7 +145,7 @@ def test_cornercase(self): def test_nans(self): A = eye(3, format='lil') - A[1,1] = np.nan + A[1, 1] = np.nan b = np.ones(3) with suppress_warnings() as sup: diff --git a/scipy/spatial/tests/test_kdtree.py b/scipy/spatial/tests/test_kdtree.py index b8b5ce725c5b..9a5cfa7e0f0e 100644 --- a/scipy/spatial/tests/test_kdtree.py +++ b/scipy/spatial/tests/test_kdtree.py @@ -1125,7 +1125,7 @@ def simulate_periodic_box(kdtree, data, k, boxsize, p): @pytest.mark.skipif(python_implementation() == 'PyPy', - reason="Fails on PyPy CI runs. See #9455") + reason="Fails on PyPy CI runs. See #9507") def test_ckdtree_memuse(): # unit test adaptation of gh-5630 From f9ad4bda4a037768768811b5d4dcb3eb79773aaa Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Thu, 15 Nov 2018 18:40:03 -0800 Subject: [PATCH 25/70] BUG: fix sparse random int handling * TestConstructUtils::test_random_sampling fails in some SciPy wheel build scenarios because of incorrect handling of np.int32/64 limits in the compatibility layer; this commit attempts to fix that behavior --- scipy/_lib/_numpy_compat.py | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/scipy/_lib/_numpy_compat.py b/scipy/_lib/_numpy_compat.py index 876f5961edfa..c4875a22d1b6 100644 --- a/scipy/_lib/_numpy_compat.py +++ b/scipy/_lib/_numpy_compat.py @@ -94,30 +94,10 @@ def get_randint(random_state): # In NumPy versions previous to 1.11.0 the randint funtion and the randint # method of RandomState does only work with int32 values. def get_randint(random_state): - def randint_patched(*args, **kwargs): - try: - low = args[0] - except IndexError: - low = None - high = kwargs.pop('high', None) - dtype = kwargs.pop('dtype', None) - - if high is None: - high = low - low = 0 - - low_min = np.iinfo(np.int32).min - if low is None: - low = low_min - else: - low = max(low, low_min) - high_max = np.iinfo(np.int32).max - if high is None: - high = high_max - else: - high = min(high, high_max) - - integers = random_state.randint(low, high=high, **kwargs) + def randint_patched(low, high, size, dtype=np.int32): + low = max(low, np.iinfo(dtype).min) + high = min(high, np.iinfo(dtype).max) + integers = random_state.randint(low, high=high, size=size) return integers.astype(dtype, copy=False) return randint_patched From 9e81f039c6aff95af70940f8b5f19fad6d2295f5 Mon Sep 17 00:00:00 2001 From: Dowon Date: Tue, 20 Nov 2018 12:12:49 +0900 Subject: [PATCH 26/70] Update test_stats.py - test method is changed (assert_allclose()) - test_ma is moved to test_mstats_basic.py --- scipy/stats/tests/test_stats.py | 74 ++++++++++----------------------- 1 file changed, 23 insertions(+), 51 deletions(-) diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index ca4f81c83624..b94c26890e18 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -3677,19 +3677,16 @@ def do(self, a, b, axis=None, dtype=None): assert_equal(x.dtype, dtype) -class GeoMeanTestMethod(object): - def equal_test(self, array_like, desired, axis=None, dtype=None, decimal=7): +class StatsTestMethod(object): + def equal_test(self, array_like, desired, axis=None, dtype=None, significant=7): # Note this doesn't test when axis is not specified + rtol = np.float_power(10, -1.0 * significant) x = stats.gmean(array_like, axis=axis, dtype=dtype) - assert_almost_equal(desired, x, decimal=decimal) + assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) - def approx_test(self, array_like, desired, axis=None, dtype=None, significant=7): - x = stats.gmean(array_like, axis=axis, dtype=dtype) - assert_approx_equal(desired, x, significant=significant) - assert_equal(x.dtype, dtype) -class TestGeoMean(GeoMeanTestMethod): +class TestGeoMean(StatsTestMethod): def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] @@ -3698,7 +3695,7 @@ def test_1d_list(self): a = [1, 2, 3, 4] desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.equal_test(a, desired, decimal=14) + self.equal_test(a, desired, significant=14) desired1 = stats.gmean(a, axis=-1) self.equal_test(a, desired1, axis=-1) @@ -3716,18 +3713,6 @@ def test_1d_array(self): desired1 = stats.gmean(a, axis=-1) self.equal_test(a, desired1, axis=-1, dtype=float32) - def test_1d_ma(self): - # Test a 1d masked array - a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 45.2872868812 - self.equal_test(a, b) - - def test_1d_ma_value(self): - # Test a 1d masked array with a masked value - a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1]) - b = 41.4716627439 - self.equal_test(a, b) - # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list @@ -3741,12 +3726,6 @@ def test_2d_array(self): b = 52.8885199 self.equal_test(array(a), b) - def test_2d_ma(self): - # Test a 2d masked array - a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 52.8885199 - self.equal_test(np.ma.array(a), b) - def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] @@ -3755,11 +3734,11 @@ def test_2d_axis0(self): a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = array([1, 2, 3, 4]) - self.equal_test(a, desired, axis=0, decimal=14) + self.equal_test(a, desired, axis=0, significant=14) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = stats.gmean(a, axis=0) - self.equal_test(a, desired, axis=0, decimal=14) + self.equal_test(a, desired, axis=0, significant=14) def test_2d_axis1(self): # Test a 2d list with axis=1 @@ -3770,7 +3749,7 @@ def test_2d_axis1(self): a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = array([v, v, v]) - self.equal_test(a, desired, axis=1, decimal=14) + self.equal_test(a, desired, axis=1, significant=14) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 @@ -3778,15 +3757,28 @@ def test_2d_matrix_axis0(self): b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) self.equal_test(np.matrix(a), b, axis=0) + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + desired = np.matrix([1, 2, 3, 4]) + self.equal_test(np.matrix(a), desired, axis=0, significant=14) + + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + desired = np.matrix(stats.gmean(a, axis=0)) + self.equal_test(np.matrix(a), desired, axis=0, significant=14) + def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T self.equal_test(np.matrix(a), b, axis=1) + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + v = power(1 * 2 * 3 * 4, 1. / 4.) + desired = np.matrix([[v], [v], [v]]) + self.equal_test(np.matrix(a), desired, axis=1, significant=14) + def test_large_values(self): a = array([1e100, 1e200, 1e300]) - self.approx_test(a, 1e200, significant=13) + self.equal_test(a, 1e200, significant=13) def test_1d_list0(self): # Test a 1d list with zero element @@ -3808,26 +3800,6 @@ def test_1d_array0(self): finally: np.seterr(**olderr) - def test_1d_ma0(self): - # Test a 1d masked array with zero element - a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) - b = 41.4716627439 - olderr = np.seterr(all='ignore') - try: - self.equal_test(a, b) - finally: - np.seterr(**olderr) - - def test_1d_ma_inf(self): - # Test a 1d masked array with negative element - a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) - b = 41.4716627439 - olderr = np.seterr(all='ignore') - try: - self.equal_test(a, b) - finally: - np.seterr(**olderr) - def test_binomtest(): # precision tests compared to R for ticket:986 From 823a5624f0a61684df77791101f2cf0ec8c461a3 Mon Sep 17 00:00:00 2001 From: Dowon Date: Tue, 20 Nov 2018 12:13:53 +0900 Subject: [PATCH 27/70] Update test_mstats_basic.py - class name is changed TestGeoMean - added some test cases from test_stats.py --- scipy/stats/tests/test_mstats_basic.py | 92 ++++++++++++++++++-------- 1 file changed, 66 insertions(+), 26 deletions(-) diff --git a/scipy/stats/tests/test_mstats_basic.py b/scipy/stats/tests/test_mstats_basic.py index 6ce2b192f183..4616f334b47e 100644 --- a/scipy/stats/tests/test_mstats_basic.py +++ b/scipy/stats/tests/test_mstats_basic.py @@ -42,48 +42,88 @@ def test_mquantiles_limit_keyword(self): assert_almost_equal(quants, desired) -class TestGMean(object): - def test_1D(self): - a = (1, 2, 3, 4) - actual = mstats.gmean(a) +class MStatsTestMethod(object): + def equal_test(self, array_like, desired, axis=None, dtype=None, significant=7): + # Note this doesn't test when axis is not specified + rtol = np.float_power(10, -1.0 * significant) + x = mstats.gmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + +class TestGeoMean(MStatsTestMethod): + def test_1d(self): + a = [1, 2, 3, 4] desired = np.power(1*2*3*4, 1./4.) - assert_almost_equal(actual, desired, decimal=14) + self.equal_test(a, desired, significant=14) desired1 = mstats.gmean(a, axis=-1) - assert_almost_equal(actual, desired1, decimal=14) + self.equal_test(a, desired1, significant=14) assert_(not isinstance(desired1, ma.MaskedArray)) - a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) - actual = mstats.gmean(a) + def test_1d_ma(self): + # Test a 1d masked array + a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + b = 45.2872868812 + self.equal_test(a, b) + + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = np.power(1*2*3, 1./3.) - assert_almost_equal(actual, desired, decimal=14) + self.equal_test(a, desired, significant=14) desired1 = mstats.gmean(a, axis=-1) - assert_almost_equal(actual, desired1, decimal=14) + self.equal_test(a, desired1, significant=14) + + def test_1d_ma_value(self): + # Test a 1d masked array with a masked value + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) + b = 41.4716627439 + self.equal_test(a, b) + + def test_1d_ma0(self): + # Test a 1d masked array with zero element + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) + b = 41.4716627439 + olderr = np.seterr(all='ignore') + try: + self.equal_test(a, b) + finally: + np.seterr(**olderr) + + def test_1d_ma_inf(self): + # Test a 1d masked array with negative element + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) + b = 41.4716627439 + olderr = np.seterr(all='ignore') + try: + self.equal_test(a, b) + finally: + np.seterr(**olderr) @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') - def test_1D_float96(self): - a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) - actual_dt = mstats.gmean(a, dtype=np.float96) + def test_1d_float96(self): + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired_dt = np.power(1*2*3, 1./3.).astype(np.float96) - assert_almost_equal(actual_dt, desired_dt, decimal=14) - assert_(actual_dt.dtype == desired_dt.dtype) + self.equal_test(a, desired_dt, dtype=np.float96, significant=14) - def test_2D(self): - a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)), - mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0))) - actual = mstats.gmean(a) - desired = np.array((1, 2, 3, 4)) - assert_array_almost_equal(actual, desired, decimal=14) + def test_2d_ma(self): + a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) + desired = np.array([1, 2, 3, 4]) + self.equal_test(a, desired, axis=0, significant=14) desired1 = mstats.gmean(a, axis=0) - assert_array_almost_equal(actual, desired1, decimal=14) + self.equal_test(a, desired1, axis=0, significant=14) - actual = mstats.gmean(a, -1) - desired = ma.array((np.power(1*2*3*4, 1./4.), + desired = ma.array([np.power(1*2*3*4, 1./4.), np.power(2*3, 1./2.), - np.power(1*4, 1./2.))) - assert_array_almost_equal(actual, desired, decimal=14) + np.power(1*4, 1./2.)]) + self.equal_test(a, desired, axis=-1, significant=14) + + # Test a 2d masked array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 52.8885199 + self.equal_test(np.ma.array(a), b) class TestHMean(object): From ce45f07696d6b25877db2b06a228d15e5c4f8aee Mon Sep 17 00:00:00 2001 From: ilayn Date: Tue, 20 Nov 2018 12:02:43 +0100 Subject: [PATCH 28/70] DOC: Update rel notes for removed funcs/aliases [ci skip] --- doc/release/1.3.0-notes.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/release/1.3.0-notes.rst b/doc/release/1.3.0-notes.rst index 76aa585ac677..443bb0426184 100644 --- a/doc/release/1.3.0-notes.rst +++ b/doc/release/1.3.0-notes.rst @@ -37,6 +37,15 @@ Deprecated features Backwards incompatible changes ============================== +Functions from ``scipy.interpolate`` (``spleval``, ``spline``, ``splmake``, +and ``spltopp``) and functions from ``scipy.misc`` (``bytescale``, ``fromimage``, +``imfilter``, ``imread``, ``imresize``, ``imrotate``, ``imsave``, ``imshow``, +``toimage``) have been removed. The former set has been deprecated since v0.19.0 +and the latter has been deprecated since v1.0.0. +Similarly, aliases from ``scipy.misc`` which have been deprecated since v1.0.0 are +removed. `SciPy documentation for v1.1.0 `__ +can be used to track the new import locations for the relocated functions. + Other changes ============= From e7ef8a263f405755b49bbca65d1b6fcf5cad78f4 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Wed, 21 Nov 2018 02:14:49 +0300 Subject: [PATCH 29/70] DEP: interpolate: remove deprecated interpolate_wrapper Removed for Scipy 1.3.0, has been deprecated since 1.0.0. --- scipy/interpolate/interpolate_wrapper.py | 187 ------------ scipy/interpolate/setup.py | 5 - scipy/interpolate/src/_interpolate.cpp | 266 ------------------ scipy/interpolate/src/interpolate.h | 205 -------------- .../tests/test_interpolate_wrapper.py | 81 ------ 5 files changed, 744 deletions(-) delete mode 100644 scipy/interpolate/interpolate_wrapper.py delete mode 100644 scipy/interpolate/src/_interpolate.cpp delete mode 100644 scipy/interpolate/src/interpolate.h delete mode 100644 scipy/interpolate/tests/test_interpolate_wrapper.py diff --git a/scipy/interpolate/interpolate_wrapper.py b/scipy/interpolate/interpolate_wrapper.py deleted file mode 100644 index b07d54de6c27..000000000000 --- a/scipy/interpolate/interpolate_wrapper.py +++ /dev/null @@ -1,187 +0,0 @@ -""" helper_funcs.py. - scavenged from enthought,interpolate -""" -from __future__ import division, print_function, absolute_import - -import numpy as np -from . import _interpolate # C extension. Does all the real work. - - -def atleast_1d_and_contiguous(ary, dtype=np.float64): - return np.atleast_1d(np.ascontiguousarray(ary, dtype)) - - -@np.deprecate(message="'nearest' is deprecated in SciPy 1.0.0") -def nearest(x, y, new_x): - """ - Rounds each new x to nearest input x and returns corresponding input y. - - Parameters - ---------- - x : array_like - Independent values. - y : array_like - Dependent values. - new_x : array_like - The x values to return the interpolate y values. - - Returns - ------- - nearest : ndarray - Rounds each `new_x` to nearest `x` and returns the corresponding `y`. - - """ - shifted_x = np.concatenate((np.array([x[0]-1]), x[0:-1])) - - midpoints_of_x = atleast_1d_and_contiguous(.5*(x + shifted_x)) - new_x = atleast_1d_and_contiguous(new_x) - - TINY = 1e-10 - indices = np.searchsorted(midpoints_of_x, new_x+TINY)-1 - indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int)) - new_y = np.take(y, indices, axis=-1) - - return new_y - - -@np.deprecate(message="'linear' is deprecated in SciPy 1.0.0") -def linear(x, y, new_x): - """ - Linearly interpolates values in new_x based on the values in x and y - - Parameters - ---------- - x : array_like - Independent values - y : array_like - Dependent values - new_x : array_like - The x values to return the interpolated y values. - - """ - x = atleast_1d_and_contiguous(x, np.float64) - y = atleast_1d_and_contiguous(y, np.float64) - new_x = atleast_1d_and_contiguous(new_x, np.float64) - - if y.ndim > 2: - raise ValueError("`linear` only works with 1-D or 2-D arrays.") - if len(y.shape) == 2: - new_y = np.zeros((y.shape[0], len(new_x)), np.float64) - for i in range(len(new_y)): # for each row - _interpolate.linear_dddd(x, y[i], new_x, new_y[i]) - else: - new_y = np.zeros(len(new_x), np.float64) - _interpolate.linear_dddd(x, y, new_x, new_y) - - return new_y - - -@np.deprecate(message="'logarithmic' is deprecated in SciPy 1.0.0") -def logarithmic(x, y, new_x): - """ - Linearly interpolates values in new_x based in the log space of y. - - Parameters - ---------- - x : array_like - Independent values. - y : array_like - Dependent values. - new_x : array_like - The x values to return interpolated y values at. - - """ - x = atleast_1d_and_contiguous(x, np.float64) - y = atleast_1d_and_contiguous(y, np.float64) - new_x = atleast_1d_and_contiguous(new_x, np.float64) - - if y.ndim > 2: - raise ValueError("`linear` only works with 1-D or 2-D arrays.") - if len(y.shape) == 2: - new_y = np.zeros((y.shape[0], len(new_x)), np.float64) - for i in range(len(new_y)): - _interpolate.loginterp_dddd(x, y[i], new_x, new_y[i]) - else: - new_y = np.zeros(len(new_x), np.float64) - _interpolate.loginterp_dddd(x, y, new_x, new_y) - - return new_y - - -@np.deprecate(message="'block_average_above' is deprecated in SciPy 1.0.0") -def block_average_above(x, y, new_x): - """ - Linearly interpolates values in new_x based on the values in x and y. - - Parameters - ---------- - x : array_like - Independent values. - y : array_like - Dependent values. - new_x : array_like - The x values to interpolate y values. - - """ - bad_index = None - x = atleast_1d_and_contiguous(x, np.float64) - y = atleast_1d_and_contiguous(y, np.float64) - new_x = atleast_1d_and_contiguous(new_x, np.float64) - - if y.ndim > 2: - raise ValueError("`linear` only works with 1-D or 2-D arrays.") - if len(y.shape) == 2: - new_y = np.zeros((y.shape[0], len(new_x)), np.float64) - for i in range(len(new_y)): - bad_index = _interpolate.block_averave_above_dddd(x, y[i], - new_x, new_y[i]) - if bad_index is not None: - break - else: - new_y = np.zeros(len(new_x), np.float64) - bad_index = _interpolate.block_average_above_dddd(x, y, new_x, new_y) - - if bad_index is not None: - msg = "block_average_above cannot extrapolate and new_x[%d]=%f "\ - "is out of the x range (%f, %f)" % \ - (bad_index, new_x[bad_index], x[0], x[-1]) - raise ValueError(msg) - - return new_y - - -@np.deprecate(message="'block' is deprecated in SciPy 1.0.0") -def block(x, y, new_x): - """ - Essentially a step function. - - For each `new_x`, finds largest j such that``x[j] < new_x[j]`` and - returns ``y[j]``. - - Parameters - ---------- - x : array_like - Independent values. - y : array_like - Dependent values. - new_x : array_like - The x values used to calculate the interpolated y. - - Returns - ------- - block : ndarray - Return array, of same length as `x_new`. - - """ - # find index of values in x that precede values in x - # This code is a little strange -- we really want a routine that - # returns the index of values where x[j] < x[index] - TINY = 1e-10 - indices = np.searchsorted(x, new_x+TINY)-1 - - # If the value is at the front of the list, it'll have -1. - # In this case, we will use the first (0), element in the array. - # take requires the index array to be an Int - indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int)) - new_y = np.take(y, indices, axis=-1) - return new_y diff --git a/scipy/interpolate/setup.py b/scipy/interpolate/setup.py index b1076ed71445..4e4f41d66114 100644 --- a/scipy/interpolate/setup.py +++ b/scipy/interpolate/setup.py @@ -39,11 +39,6 @@ def configuration(parent_package='',top_path=None): depends=fitpack_src, ) - config.add_extension('_interpolate', - sources=['src/_interpolate.cpp'], - include_dirs=['src'], - depends=['src/interpolate.h']) - config.add_data_dir('tests') return config diff --git a/scipy/interpolate/src/_interpolate.cpp b/scipy/interpolate/src/_interpolate.cpp deleted file mode 100644 index c4c5e7436be2..000000000000 --- a/scipy/interpolate/src/_interpolate.cpp +++ /dev/null @@ -1,266 +0,0 @@ -#include "Python.h" -#include - -#include "interpolate.h" -#include "numpy/arrayobject.h" - -using namespace std; - -extern "C" { - -static PyObject* linear_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char const *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - - if(!PyArg_ParseTupleAndKeywords(args, kywds, "OOOO:linear_dddd", - const_cast(kwlist), &py_x, &py_y, - &py_new_x, &py_new_y)) - return NULL; - arr_x = PyArray_FROMANY(py_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, NPY_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - linear((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0)); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyObject* loginterp_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char const *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - - if(!PyArg_ParseTupleAndKeywords(args, kywds, "OOOO:loginterp_dddd", - const_cast(kwlist), &py_x, &py_y, - &py_new_x, &py_new_y)) - return NULL; - arr_x = PyArray_FROMANY(py_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, NPY_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - loginterp((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0)); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyObject* window_average_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char const *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - double width; - - if(!PyArg_ParseTupleAndKeywords(args, kywds, "OOOOd:loginterp_dddd", - const_cast(kwlist), &py_x, &py_y, - &py_new_x, &py_new_y, &width)) - return NULL; - arr_x = PyArray_FROMANY(py_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, NPY_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - window_average((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0), width); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyObject* block_average_above_method(PyObject*self, PyObject* args, PyObject* kywds) -{ - static char const *kwlist[] = {"x","y","new_x","new_y", NULL}; - PyObject *py_x, *py_y, *py_new_x, *py_new_y; - py_x = py_y = py_new_x = py_new_y = NULL; - PyObject *arr_x, *arr_y, *arr_new_x, *arr_new_y; - arr_x = arr_y = arr_new_x = arr_new_y = NULL; - - if(!PyArg_ParseTupleAndKeywords(args, kywds, "OOOO:loginterp_dddd", - const_cast(kwlist), &py_x, &py_y, - &py_new_x, &py_new_y)) - return NULL; - arr_x = PyArray_FROMANY(py_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_x) { - PyErr_SetString(PyExc_ValueError, "x must be a 1-D array of floats"); - goto fail; - } - arr_y = PyArray_FROMANY(py_y, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_y) { - PyErr_SetString(PyExc_ValueError, "y must be a 1-D array of floats"); - goto fail; - } - arr_new_x = PyArray_FROMANY(py_new_x, NPY_DOUBLE, 1, 1, NPY_IN_ARRAY); - if (!arr_new_x) { - PyErr_SetString(PyExc_ValueError, "new_x must be a 1-D array of floats"); - goto fail; - } - arr_new_y = PyArray_FROMANY(py_new_y, NPY_DOUBLE, 1, 1, NPY_INOUT_ARRAY); - if (!arr_new_y) { - PyErr_SetString(PyExc_ValueError, "new_y must be a 1-D array of floats"); - goto fail; - } - - block_average_above((double*)PyArray_DATA(arr_x), (double*)PyArray_DATA(arr_y), - PyArray_DIM(arr_x,0), (double*)PyArray_DATA(arr_new_x), - (double*)PyArray_DATA(arr_new_y), PyArray_DIM(arr_new_x,0)); - - Py_DECREF(arr_x); - Py_DECREF(arr_y); - Py_DECREF(arr_new_x); - Py_DECREF(arr_new_y); - - Py_RETURN_NONE; - -fail: - Py_XDECREF(arr_x); - Py_XDECREF(arr_y); - Py_XDECREF(arr_new_x); - Py_XDECREF(arr_new_y); - return NULL; -} - -static PyMethodDef interpolate_methods[] = { - {"linear_dddd", (PyCFunction)linear_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {"loginterp_dddd", (PyCFunction)loginterp_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {"window_average_ddddd", (PyCFunction)window_average_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {"block_average_above_dddd", (PyCFunction)block_average_above_method, METH_VARARGS|METH_KEYWORDS, - ""}, - {NULL, NULL, 0, NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_interpolate", - NULL, - -1, - interpolate_methods, - NULL, - NULL, - NULL, - NULL -}; - -PyObject *PyInit__interpolate(void) -{ - PyObject *m; - - m = PyModule_Create(&moduledef); - import_array(); - - return m; -} -#else -PyMODINIT_FUNC init_interpolate(void) -{ - PyObject* m; - m = Py_InitModule3("_interpolate", interpolate_methods, - "A few interpolation routines.\n" - ); - if (m == NULL) - return; - import_array(); -} -#endif -} // extern "C" diff --git a/scipy/interpolate/src/interpolate.h b/scipy/interpolate/src/interpolate.h deleted file mode 100644 index 0de37d1b2786..000000000000 --- a/scipy/interpolate/src/interpolate.h +++ /dev/null @@ -1,205 +0,0 @@ -#include -#include -#include -#include - -template -void linear(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len) -{ - for (int i=0;i=x_vec[len-1]) - index = len-2; - else - { - T* which = std::lower_bound(x_vec, x_vec+len, new_x); - index = which - x_vec-1; - } - - if(new_x == x_vec[index]) - { - // exact value - new_y_vec[i] = y_vec[index]; - } - else - { - //interpolate - double x_lo = x_vec[index]; - double x_hi = x_vec[index+1]; - double y_lo = y_vec[index]; - double y_hi = y_vec[index+1]; - double slope = (y_hi-y_lo)/(x_hi-x_lo); - new_y_vec[i] = slope * (new_x-x_lo) + y_lo; - } - } -} - -template -void loginterp(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len) -{ - for (int i=0;i=x_vec[len-1]) - index = len-2; - else - { - T* which = std::lower_bound(x_vec, x_vec+len, new_x); - index = which - x_vec-1; - } - - if(new_x == x_vec[index]) - { - // exact value - new_y_vec[i] = y_vec[index]; - } - else - { - //interpolate - double x_lo = x_vec[index]; - double x_hi = x_vec[index+1]; - double y_lo = log10(y_vec[index]); - double y_hi = log10(y_vec[index+1]); - double slope = (y_hi-y_lo)/(x_hi-x_lo); - new_y_vec[i] = pow(10.0, (slope * (new_x-x_lo) + y_lo)); - } - } -} - -template -int block_average_above(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len) -{ - int bad_index = -1; - int start_index = 0; - T last_y = 0.0; - T thickness = 0.0; - - for(int i=0;i x_vec[len-1])) - { - bad_index = i; - break; - } - else if (new_x == x_vec[0]) - { - // for the first sample, just return the cooresponding y value - new_y_vec[i] = y_vec[0]; - } - else - { - T* which = std::lower_bound(x_vec, x_vec+len, new_x); - int index = which - x_vec-1; - - // calculate weighted average - - // Start off with "residue" from last interval in case last x - // was between to samples. - T weighted_y_sum = last_y * thickness; - T thickness_sum = thickness; - for(int j=start_index; j<=index; j++) - { - if (x_vec[j+1] < new_x) - thickness = x_vec[j+1] - x_vec[j]; - else - thickness = new_x -x_vec[j]; - weighted_y_sum += y_vec[j] * thickness; - thickness_sum += thickness; - } - new_y_vec[i] = weighted_y_sum/thickness_sum; - - // Store the thickness between the x value and the next sample - // to add to the next weighted average. - last_y = y_vec[index]; - thickness = x_vec[index+1] - new_x; - - // start next weighted average at next sample - start_index =index+1; - } - } - return bad_index; -} - -template -int window_average(T* x_vec, T* y_vec, int len, - T* new_x_vec, T* new_y_vec, int new_len, - T width) -{ - for(int i=0;i= len) - { - //top = x_vec[len-1]; - top_index = len-1; - } - //std::cout << std::endl; - //std::cout << bottom_index << " " << top_index << std::endl; - //std::cout << bottom << " " << top << std::endl; - // calculate weighted average - T thickness =0.0; - T thickness_sum =0.0; - T weighted_y_sum =0.0; - for(int j=bottom_index; j < top_index; j++) - { - thickness = x_vec[j+1] - bottom; - weighted_y_sum += y_vec[j] * thickness; - thickness_sum += thickness; - bottom = x_vec[j+1]; - /* - std::cout << "iter: " << j - bottom_index << " " << - "index: " << j << " " << - "bottom: " << bottom << " " << - "x+1: " << x_vec[j+1] << " " << - "x: " << x_vec[j] << " " << - "y: " << y_vec[j] << " " << - "weighted_sum: " << weighted_y_sum << - "thickness: " << thickness << " " << - "thickness_sum: " << thickness_sum << std::endl; - */ - //std::cout << x_vec[j] << " "; - //std::cout << thickness << " "; - } - - // last element - thickness = top - bottom; - weighted_y_sum += y_vec[top_index] * thickness; - thickness_sum += thickness; - /* - std::cout << "iter: last" << " " << - "index: " << top_index << " " << - "x: " << x_vec[top_index] << " " << - "y: " << y_vec[top_index] << " " << - "weighted_sum: " << weighted_y_sum << - "thickness: " << thickness << " " << - "thickness_sum: " << thickness_sum << std::endl; - */ - //std::cout << x_vec[top_index] << " " << thickness_sum << std::endl; - new_y_vec[i] = weighted_y_sum/thickness_sum; - } - return -1; -} diff --git a/scipy/interpolate/tests/test_interpolate_wrapper.py b/scipy/interpolate/tests/test_interpolate_wrapper.py deleted file mode 100644 index 6b98a958ae6e..000000000000 --- a/scipy/interpolate/tests/test_interpolate_wrapper.py +++ /dev/null @@ -1,81 +0,0 @@ -""" module to test interpolate_wrapper.py -""" -from __future__ import division, print_function, absolute_import - -from numpy import arange, allclose, ones, isnan -import numpy as np -from numpy.testing import (assert_, assert_allclose) -from scipy._lib._numpy_compat import suppress_warnings - -# functionality to be tested -from scipy.interpolate.interpolate_wrapper import (linear, logarithmic, - block_average_above, nearest) - - -class Test(object): - - def assertAllclose(self, x, y, rtol=1.0e-5): - for i, xi in enumerate(x): - assert_(allclose(xi, y[i], rtol) or (isnan(xi) and isnan(y[i]))) - - def test_nearest(self): - N = 5 - x = arange(N) - y = arange(N) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "`nearest` is deprecated") - assert_allclose(y, nearest(x, y, x+.1)) - assert_allclose(y, nearest(x, y, x-.1)) - - def test_linear(self): - N = 3000. - x = arange(N) - y = arange(N) - new_x = arange(N)+0.5 - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "`linear` is deprecated") - new_y = linear(x, y, new_x) - - assert_allclose(new_y[:5], [0.5, 1.5, 2.5, 3.5, 4.5]) - - def test_block_average_above(self): - N = 3000 - x = arange(N, dtype=float) - y = arange(N, dtype=float) - - new_x = arange(N // 2) * 2 - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "`block_average_above` is deprecated") - new_y = block_average_above(x, y, new_x) - assert_allclose(new_y[:5], [0.0, 0.5, 2.5, 4.5, 6.5]) - - def test_linear2(self): - N = 3000 - x = arange(N, dtype=float) - y = ones((100,N)) * arange(N) - new_x = arange(N) + 0.5 - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "`linear` is deprecated") - new_y = linear(x, y, new_x) - assert_allclose(new_y[:5,:5], - [[0.5, 1.5, 2.5, 3.5, 4.5], - [0.5, 1.5, 2.5, 3.5, 4.5], - [0.5, 1.5, 2.5, 3.5, 4.5], - [0.5, 1.5, 2.5, 3.5, 4.5], - [0.5, 1.5, 2.5, 3.5, 4.5]]) - - def test_logarithmic(self): - N = 4000. - x = arange(N) - y = arange(N) - new_x = arange(N)+0.5 - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "`logarithmic` is deprecated") - new_y = logarithmic(x, y, new_x) - correct_y = [np.NaN, 1.41421356, 2.44948974, 3.46410162, 4.47213595] - assert_allclose(new_y[:5], correct_y) - - def runTest(self): - test_list = [name for name in dir(self) if name.find('test_') == 0] - for test_name in test_list: - exec("self.%s()" % test_name) From 0e02a68dd2283c37ee5d2aa3d09e68ebff27fe63 Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Wed, 21 Nov 2018 21:45:13 +0100 Subject: [PATCH 30/70] DOC: Add deprecated alias functions [ci skip] --- doc/release/1.3.0-notes.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/release/1.3.0-notes.rst b/doc/release/1.3.0-notes.rst index 443bb0426184..f65a36e8e3c0 100644 --- a/doc/release/1.3.0-notes.rst +++ b/doc/release/1.3.0-notes.rst @@ -42,8 +42,10 @@ and ``spltopp``) and functions from ``scipy.misc`` (``bytescale``, ``fromimage`` ``imfilter``, ``imread``, ``imresize``, ``imrotate``, ``imsave``, ``imshow``, ``toimage``) have been removed. The former set has been deprecated since v0.19.0 and the latter has been deprecated since v1.0.0. -Similarly, aliases from ``scipy.misc`` which have been deprecated since v1.0.0 are -removed. `SciPy documentation for v1.1.0 `__ +Similarly, aliases from ``scipy.misc`` (``comb``, ``factorial``, ``factorial2``, +``factorialk``, ``logsumexp``, ``pade``, ``info``, ``source``, ``who``)which have +been deprecated since v1.0.0 are removed. `SciPy documentation for +v1.1.0 `__ can be used to track the new import locations for the relocated functions. From 7a68a8665ac022107ba604f524072a61e189bf14 Mon Sep 17 00:00:00 2001 From: Paul van Mulbregt Date: Wed, 21 Nov 2018 18:52:29 -0500 Subject: [PATCH 31/70] DOC: Replace "Numpy" with "NumPy" for consistency in the documentation: Change Numpy in .rst and .txt doc files, and in the doc of .py files. Release notes untouched. --- HACKING.rst.txt | 6 +++--- doc/source/building/windows.rst | 10 +++++----- doc/source/dev/distributing.rst | 20 ++++++++++---------- doc/source/dev/releasing.rst | 4 ++-- doc/source/tutorial/basic.rst | 20 ++++++++++---------- doc/source/tutorial/general.rst | 4 ++-- doc/source/tutorial/io.rst | 2 +- doc/source/tutorial/linalg.rst | 2 +- doc/source/tutorial/signal.rst | 8 ++++---- runtests.py | 4 ++-- scipy/ndimage/measurements.py | 6 +++--- scipy/signal/ltisys.py | 2 +- scipy/signal/signaltools.py | 2 +- scipy/sparse/dok.py | 2 +- scipy/sparse/sparsetools.py | 2 +- scipy/special/_generate_pyx.py | 2 +- scipy/special/_logsumexp.py | 2 +- scipy/stats/_discrete_distns.py | 2 +- scipy/stats/stats.py | 2 +- setup.py | 4 ++-- 20 files changed, 53 insertions(+), 53 deletions(-) diff --git a/HACKING.rst.txt b/HACKING.rst.txt index 9dcee793babc..d2a9c6149d19 100644 --- a/HACKING.rst.txt +++ b/HACKING.rst.txt @@ -261,7 +261,7 @@ following, a *SciPy module* is defined as a Python package, say for instance ``yyy/_somemodule.py``. * User-visible functions should have good documentation following - the Numpy documentation style, see `how to document`_ + the NumPy documentation style, see `how to document`_ * The ``__init__.py`` of the module should contain the main reference documentation in its docstring. This is connected to the Sphinx @@ -277,7 +277,7 @@ following, a *SciPy module* is defined as a Python package, say See the existing Scipy submodules for guidance. -For further details on Numpy distutils, see: +For further details on NumPy distutils, see: https://github.com/numpy/numpy/blob/master/doc/DISTUTILS.rst.txt @@ -356,7 +356,7 @@ command ``source scipy-dev/bin/activate``, and ``deactivate`` to exit from the virtual environment and back to your previous shell. With scipy-dev activated, install first Scipy's dependencies:: - $ pip install Numpy pytest Cython + $ pip install NumPy pytest Cython After that, you can install a development version of Scipy, for example via:: diff --git a/doc/source/building/windows.rst b/doc/source/building/windows.rst index 7787c81fa2c5..ceb64b6ec85a 100644 --- a/doc/source/building/windows.rst +++ b/doc/source/building/windows.rst @@ -229,7 +229,7 @@ not be used. Attempting to build with the MSYS2 Python will not work correctly.* Please note that this is a simpler procedure than what is used for the official binaries. **Your binaries will only work with the latest NumPy (v1.14.0dev and higher)**. For -building against older Numpy versions, see `Building Against an Older Numpy Version`_. +building against older NumPy versions, see `Building Against an Older NumPy Version`_. Make sure that you are in the same directory where ``setup.py`` is (you should be if you have not changed directories): @@ -257,13 +257,13 @@ Congratulatations, you've built SciPy! .. _`pre-built zip files`: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/ .. _WindowsCompilers: https://wiki.python.org/moin/WindowsCompilers -Building Against an Older Numpy Version +Building Against an Older NumPy Version --------------------------------------- If you want to build SciPy to work with an older numpy version, then you will need -to replace the Numpy "distutils" folder with the folder from the latest numpy. -The following powershell snippet can upgrade Numpy distutils while retaining an older -Numpy ABI_. +to replace the NumPy "distutils" folder with the folder from the latest numpy. +The following powershell snippet can upgrade NumPy distutils while retaining an older +NumPy ABI_. .. code:: shell diff --git a/doc/source/dev/distributing.rst b/doc/source/dev/distributing.rst index a8e288fc20eb..07995c02018d 100644 --- a/doc/source/dev/distributing.rst +++ b/doc/source/dev/distributing.rst @@ -64,9 +64,9 @@ There are some serious issues with how Python packaging tools handle dependencies reported by projects. Because SciPy gets regular bug reports about this, we go in a bit of detail here. -SciPy only reports its dependency on Numpy via ``install_requires`` if Numpy +SciPy only reports its dependency on NumPy via ``install_requires`` if NumPy isn't installed at all on a system. This will only change when there are -either 32-bit and 64-bit Windows wheels for Numpy on PyPI or when +either 32-bit and 64-bit Windows wheels for NumPy on PyPI or when ``pip upgrade`` becomes available (with sane behavior, unlike ``pip install -U``, see `this PR `_). For more details, see @@ -82,7 +82,7 @@ as "wontfix"). .. _supported-py-numpy-versions: -Supported Python and Numpy versions +Supported Python and NumPy versions ----------------------------------- The Python_ versions that SciPy supports are listed in the list of PyPI classifiers in ``setup.py``, and mentioned in the release notes for each @@ -90,16 +90,16 @@ release. All newly released Python versions will be supported as soon as possible. The general policy on dropping support for a Python version is that (a) usage of that version has to be quite low (say <5% of users) and (b) the version isn't included in an active long-term support release of one of the -main Linux distributions anymore. SciPy typically follows Numpy, which has a +main Linux distributions anymore. SciPy typically follows NumPy, which has a similar policy. The final decision on dropping support is always taken on the scipy-dev mailing list. The lowest supported Numpy_ version for a SciPy version is mentioned in the release notes and is encoded in ``scipy/__init__.py`` and the ``install_requires`` field of ``setup.py``. Typically the latest SciPy release -supports 3 or 4 minor versions of Numpy. That may become more if the frequency -of Numpy releases increases (it's about 1x/year at the time of writing). -Support for a particular Numpy version is typically dropped if (a) that Numpy +supports 3 or 4 minor versions of NumPy. That may become more if the frequency +of NumPy releases increases (it's about 1x/year at the time of writing). +Support for a particular NumPy version is typically dropped if (a) that NumPy version is several years old, and (b) the maintenance cost of keeping support is starting to outweigh the benefits. The final decision on dropping support is always taken on the scipy-dev mailing list. @@ -125,8 +125,8 @@ and distributing them on PyPI or elsewhere. - A binary is specific for a single Python version (because different Python versions aren't ABI-compatible, at least up to Python 3.4). -- Build against the lowest Numpy version that you need to support, then it will - work for all Numpy versions with the same major version number (Numpy does +- Build against the lowest NumPy version that you need to support, then it will + work for all NumPy versions with the same major version number (NumPy does maintain backwards ABI compatibility). **Windows** @@ -135,7 +135,7 @@ and distributing them on PyPI or elsewhere. Python.org compatible binaries for SciPy is installing MSVC (see https://wiki.python.org/moin/WindowsCompilers) and mingw64-gfortran. Support for this configuration requires numpy.distutils from - Numpy >= 1.14.dev and a gcc/gfortran-compiled static ``openblas.a``. + NumPy >= 1.14.dev and a gcc/gfortran-compiled static ``openblas.a``. This configuration is currently used in the Appveyor configuration for https://github.com/MacPython/scipy-wheels - For 64-bit Windows installers built with a free toolchain, use the method diff --git a/doc/source/dev/releasing.rst b/doc/source/dev/releasing.rst index fd056181f1c5..d9418b7c9428 100644 --- a/doc/source/dev/releasing.rst +++ b/doc/source/dev/releasing.rst @@ -117,8 +117,8 @@ commit (not the tag, see section above) to the scipy repo. To build wheels, push a commit to the master branch of https://github.com/MacPython/scipy-wheels . This triggers builds for all needed Python versions on TravisCI. Update and check the ``.travis.yml`` and ``appveyor.yml`` -config files what commit to build, and what Python and Numpy are used for the -builds (it needs to be the lowest supported Numpy version for each Python +config files what commit to build, and what Python and NumPy are used for the +builds (it needs to be the lowest supported NumPy version for each Python version). See the README file in the scipy-wheels repo for more details. The TravisCI and Appveyor builds run the tests from the built wheels and if they pass, diff --git a/doc/source/tutorial/basic.rst b/doc/source/tutorial/basic.rst index 2ca452732ba8..2cd68b2e0ea4 100644 --- a/doc/source/tutorial/basic.rst +++ b/doc/source/tutorial/basic.rst @@ -7,17 +7,17 @@ Basic functions .. contents:: -Interaction with Numpy +Interaction with NumPy ---------------------- -SciPy builds on Numpy, and for all basic array handling needs you can -use Numpy functions: +SciPy builds on NumPy, and for all basic array handling needs you can +use NumPy functions: >>> import numpy as np >>> np.some_function() Rather than giving a detailed description of each of these functions -(which is available in the Numpy Reference Guide or by using the +(which is available in the NumPy Reference Guide or by using the :func:`help`, :func:`info` and :func:`source` commands), this tutorial will discuss some of the more useful commands which require a little introduction to use to their full potential. @@ -99,7 +99,7 @@ usage: Having meshed arrays like this is sometimes very useful. However, it is not always needed just to evaluate some N-dimensional function over -a grid due to the array-broadcasting rules of Numpy and SciPy. If this +a grid due to the array-broadcasting rules of NumPy and SciPy. If this is the only purpose for generating a meshgrid, you should instead use the function :obj:`ogrid` which generates an "open" grid using :obj:`newaxis` judiciously to create N, N-d arrays where only one dimension in each @@ -123,7 +123,7 @@ Polynomials ^^^^^^^^^^^ There are two (interchangeable) ways to deal with 1-d polynomials in -SciPy. The first is to use the :class:`poly1d` class from Numpy. This +SciPy. The first is to use the :class:`poly1d` class from NumPy. This class accepts coefficients or polynomial roots to initialize a polynomial. The polynomial object can then be manipulated in algebraic expressions, integrated, differentiated, and evaluated. It even prints @@ -158,7 +158,7 @@ Vectorizing functions (vectorize) One of the features that NumPy provides is a class :obj:`vectorize` to convert an ordinary Python function which accepts scalars and returns scalars into a "vectorized-function" with the same broadcasting rules -as other Numpy functions (*i.e.* the Universal functions, or +as other NumPy functions (*i.e.* the Universal functions, or ufuncs). For example, suppose you have a Python function named :obj:`addsubtract` defined as: @@ -198,7 +198,7 @@ complex number. While complex numbers and arrays have attributes that return those values, if one is not sure whether or not the object will be complex-valued, it is better to use the functional forms :func:`np.real` and :func:`np.imag` . These functions succeed for anything -that can be turned into a Numpy array. Consider also the function +that can be turned into a NumPy array. Consider also the function :func:`np.real_if_close` which transforms a complex-valued number with tiny imaginary part into a real number. @@ -207,7 +207,7 @@ Occasionally the need to check whether or not a number is a scalar occurs in coding. This functionality is provided in the convenient function :func:`np.isscalar` which returns a 1 or a 0. -Finally, ensuring that objects are a certain Numpy type occurs often +Finally, ensuring that objects are a certain NumPy type occurs often enough that it has been given a convenient interface in SciPy through the use of the :obj:`np.cast` dictionary. The dictionary is keyed by the type it is desired to cast to and the dictionary stores functions to @@ -227,7 +227,7 @@ mentioned. For doing phase processing, the functions :func:`angle`, and :obj:`unwrap` are useful. Also, the :obj:`linspace` and :obj:`logspace` functions return equally spaced samples in a linear or log scale. Finally, it's useful to be aware of the indexing -capabilities of Numpy. Mention should be made of the function +capabilities of NumPy. Mention should be made of the function :obj:`select` which extends the functionality of :obj:`where` to include multiple conditions and multiple choices. The calling convention is ``select(condlist,choicelist,default=0).`` :obj:`select` diff --git a/doc/source/tutorial/general.rst b/doc/source/tutorial/general.rst index 2c1a91820674..14ed4cac6a20 100644 --- a/doc/source/tutorial/general.rst +++ b/doc/source/tutorial/general.rst @@ -5,7 +5,7 @@ Introduction .. contents:: SciPy is a collection of mathematical algorithms and convenience -functions built on the Numpy extension of Python. It adds +functions built on the NumPy extension of Python. It adds significant power to the interactive Python session by providing the user with high-level commands and classes for manipulating and visualizing data. With SciPy an interactive Python session @@ -26,7 +26,7 @@ This tutorial will acquaint the first-time user of SciPy with some of its most important features. It assumes that the user has already installed the SciPy package. Some general Python facility is also assumed, such as could be acquired by working through the Python distribution's Tutorial. For further -introductory help the user is directed to the Numpy documentation. +introductory help the user is directed to the NumPy documentation. For brevity and convenience, we will often assume that the main packages (numpy, scipy, and matplotlib) have been imported as:: diff --git a/doc/source/tutorial/io.rst b/doc/source/tutorial/io.rst index 018a511c7a71..c78552f6c803 100644 --- a/doc/source/tutorial/io.rst +++ b/doc/source/tutorial/io.rst @@ -45,7 +45,7 @@ How do I start? ``````````````` You may have a ``.mat`` file that you want to read into SciPy. Or, you -want to pass some variables from SciPy / Numpy into MATLAB. +want to pass some variables from SciPy / NumPy into MATLAB. To save us using a MATLAB license, let's start in Octave_. Octave has MATLAB-compatible save and load functions. Start Octave (``octave`` at diff --git a/doc/source/tutorial/linalg.rst b/doc/source/tutorial/linalg.rst index 03723cf9c94a..0f7f838e315f 100644 --- a/doc/source/tutorial/linalg.rst +++ b/doc/source/tutorial/linalg.rst @@ -111,7 +111,7 @@ The inverse of a matrix :math:`\mathbf{A}` is the matrix :math:`\mathbf{I}` is the identity matrix consisting of ones down the main diagonal. Usually :math:`\mathbf{B}` is denoted :math:`\mathbf{B}=\mathbf{A}^{-1}` . In SciPy, the matrix inverse of -the Numpy array, A, is obtained using :obj:`linalg.inv` ``(A)`` , or +the NumPy array, A, is obtained using :obj:`linalg.inv` ``(A)`` , or using ``A.I`` if ``A`` is a Matrix. For example, let .. math:: diff --git a/doc/source/tutorial/signal.rst b/doc/source/tutorial/signal.rst index b2ecbe7820c3..92b87877c2ed 100644 --- a/doc/source/tutorial/signal.rst +++ b/doc/source/tutorial/signal.rst @@ -151,17 +151,17 @@ Filtering --------- Filtering is a generic name for any system that modifies an input -signal in some way. In SciPy a signal can be thought of as a Numpy +signal in some way. In SciPy a signal can be thought of as a NumPy array. There are different kinds of filters for different kinds of operations. There are two broad kinds of filtering operations: linear and non-linear. Linear filters can always be reduced to multiplication -of the flattened Numpy array by an appropriate matrix resulting in -another flattened Numpy array. Of course, this is not usually the best +of the flattened NumPy array by an appropriate matrix resulting in +another flattened NumPy array. Of course, this is not usually the best way to compute the filter as the matrices and vectors involved may be huge. For example filtering a :math:`512 \times 512` image with this method would require multiplication of a :math:`512^2 \times 512^2` matrix with a :math:`512^2` vector. Just trying to store the -:math:`512^2 \times 512^2` matrix using a standard Numpy array would +:math:`512^2 \times 512^2` matrix using a standard NumPy array would require :math:`68,719,476,736` elements. At 4 bytes per element this would require :math:`256\textrm{GB}` of memory. In most applications most of the elements of this matrix are zero and a different method diff --git a/runtests.py b/runtests.py index 2272942edd33..1d01f0cb409e 100755 --- a/runtests.py +++ b/runtests.py @@ -26,7 +26,7 @@ """ # -# This is a generic test runner script for projects using Numpy's test +# This is a generic test runner script for projects using NumPy's test # framework. Change the following values to adapt to your project: # @@ -106,7 +106,7 @@ def main(argv): help="Debug build") parser.add_argument("--parallel", "-j", type=int, default=1, help="Number of parallel jobs during build (requires " - "Numpy 1.10 or greater).") + "NumPy 1.10 or greater).") parser.add_argument("--show-build-log", action="store_true", help="Show build output rather than using a log file") parser.add_argument("--bench", action="store_true", diff --git a/scipy/ndimage/measurements.py b/scipy/ndimage/measurements.py index 38fbe8c0e8eb..e07fdd8283ba 100644 --- a/scipy/ndimage/measurements.py +++ b/scipy/ndimage/measurements.py @@ -888,7 +888,7 @@ def minimum(input, labels=None, index=None): Notes ----- - The function returns a Python list and not a Numpy array, use + The function returns a Python list and not a NumPy array, use `np.array` to convert the list to an array. Examples @@ -950,7 +950,7 @@ def maximum(input, labels=None, index=None): Notes ----- - The function returns a Python list and not a Numpy array, use + The function returns a Python list and not a NumPy array, use `np.array` to convert the list to an array. Examples @@ -1028,7 +1028,7 @@ def median(input, labels=None, index=None): Notes ----- - The function returns a Python list and not a Numpy array, use + The function returns a Python list and not a NumPy array, use `np.array` to convert the list to an array. Examples diff --git a/scipy/signal/ltisys.py b/scipy/signal/ltisys.py index d6485b090272..dbb42707ed7f 100644 --- a/scipy/signal/ltisys.py +++ b/scipy/signal/ltisys.py @@ -1294,7 +1294,7 @@ class StateSpace(LinearTimeInvariant): """ - # Override Numpy binary operations and ufuncs + # Override NumPy binary operations and ufuncs __array_priority__ = 100.0 __array_ufunc__ = None diff --git a/scipy/signal/signaltools.py b/scipy/signal/signaltools.py index cce171fdbd37..2408c174f488 100644 --- a/scipy/signal/signaltools.py +++ b/scipy/signal/signaltools.py @@ -520,7 +520,7 @@ def _reverse_and_conj(x): def _np_conv_ok(volume, kernel, mode): """ See if numpy supports convolution of `volume` and `kernel` (i.e. both are - 1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the + 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the size of the larger input, while SciPy's uses the size of the first input. Invalid mode strings will return False and be caught by the calling func. diff --git a/scipy/sparse/dok.py b/scipy/sparse/dok.py index 9dfcd29d2c97..6f627f9ed205 100644 --- a/scipy/sparse/dok.py +++ b/scipy/sparse/dok.py @@ -235,7 +235,7 @@ def __getitem__(self, index): return newdok def _getitem_ranges(self, i_indices, j_indices, shape): - # performance golf: we don't want Numpy scalars here, they are slow + # performance golf: we don't want NumPy scalars here, they are slow i_start, i_stop, i_stride = map(int, i_indices) j_start, j_stop, j_stride = map(int, j_indices) diff --git a/scipy/sparse/sparsetools.py b/scipy/sparse/sparsetools.py index c7f190ee86cd..639ec8851cd1 100644 --- a/scipy/sparse/sparsetools.py +++ b/scipy/sparse/sparsetools.py @@ -21,7 +21,7 @@ def _deprecated(): _deprecated() except DeprecationWarning as e: # don't fail import if DeprecationWarnings raise error -- works around - # the situation with Numpy's test framework + # the situation with NumPy's test framework pass from ._sparsetools import * diff --git a/scipy/special/_generate_pyx.py b/scipy/special/_generate_pyx.py index 603739d36ef8..ad4fe69c4502 100644 --- a/scipy/special/_generate_pyx.py +++ b/scipy/special/_generate_pyx.py @@ -490,7 +490,7 @@ def iter_variants(inputs, outputs): # can lead to incorrect dtype selection if the integer arguments are # arrays, but float arguments are scalars. # For instance sph_harm(0,[0],0,0).dtype == complex64 - # This may be a Numpy bug, but we need to work around it. + # This may be a NumPy bug, but we need to work around it. # cf. gh-4895, https://github.com/numpy/numpy/issues/5895 maps = maps + [(a + 'dD', b + 'fF') for a, b in maps] diff --git a/scipy/special/_logsumexp.py b/scipy/special/_logsumexp.py index 2e4bbc6ce7ff..f31669577d82 100644 --- a/scipy/special/_logsumexp.py +++ b/scipy/special/_logsumexp.py @@ -54,7 +54,7 @@ def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): Notes ----- - Numpy has a logaddexp function which is very similar to `logsumexp`, but + NumPy has a logaddexp function which is very similar to `logsumexp`, but only handles two arguments. `logaddexp.reduce` is similar to this function, but may be less stable. diff --git a/scipy/stats/_discrete_distns.py b/scipy/stats/_discrete_distns.py index 4aa38d79f380..dc06dd0039dc 100644 --- a/scipy/stats/_discrete_distns.py +++ b/scipy/stats/_discrete_distns.py @@ -708,7 +708,7 @@ def _stats(self, low, high): def _rvs(self, low, high): """An array of *size* random integers >= ``low`` and < ``high``.""" if self._size is not None: - # Numpy's RandomState.randint() doesn't broadcast its arguments. + # NumPy's RandomState.randint() doesn't broadcast its arguments. # Use `broadcast_to()` to extend the shapes of low and high # up to self._size. Then we can use the numpy.vectorize'd # randint without needing to pass it a `size` argument. diff --git a/scipy/stats/stats.py b/scipy/stats/stats.py index 10007d231775..db18618db519 100644 --- a/scipy/stats/stats.py +++ b/scipy/stats/stats.py @@ -1664,7 +1664,7 @@ def scoreatpercentile(a, per, limit=(), interpolation_method='fraction', Notes ----- This function will become obsolete in the future. - For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality + For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality that `scoreatpercentile` provides. And it's significantly faster. Therefore it's recommended to use `numpy.percentile` for users that have numpy >= 1.9. diff --git a/setup.py b/setup.py index 67661baad8b0..940ba20f7109 100755 --- a/setup.py +++ b/setup.py @@ -478,8 +478,8 @@ def setup_package(): metadata['configuration'] = configuration else: # Don't import numpy here - non-build actions are required to succeed - # without Numpy for example when pip is used to install Scipy when - # Numpy is not yet present in the system. + # without NumPy for example when pip is used to install Scipy when + # NumPy is not yet present in the system. # Version number is added to metadata inside configuration() if build # is run. From 09fb16dbfa5be0633cfe29fa3df3e32ed663a368 Mon Sep 17 00:00:00 2001 From: Paul van Mulbregt Date: Wed, 21 Nov 2018 18:58:11 -0500 Subject: [PATCH 32/70] DOC: Replace "Numpy" with "NumPy" for consistency in the doc (2 more files) --- THANKS.txt | 2 +- scipy/linalg/_solvers.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/THANKS.txt b/THANKS.txt index d35cc75dd004..ee92222570af 100644 --- a/THANKS.txt +++ b/THANKS.txt @@ -80,7 +80,7 @@ Martin Teichmann for improving scipy.special.ellipk & agm accuracy, and for linalg.qr_multiply. Jeff Armstrong for discrete state-space and linear time-invariant functionality in scipy.signal, and sylvester/riccati/lyapunov solvers in scipy.linalg. -Mark Wiebe for fixing type casting after changes in Numpy. +Mark Wiebe for fixing type casting after changes in NumPy. Andrey Smirnov for improvements to FIR filter design. Anthony Scopatz for help with code review and merging. Lars Buitinck for improvements to scipy.sparse and various other modules. diff --git a/scipy/linalg/_solvers.py b/scipy/linalg/_solvers.py index f5c5d3a4a51e..e8129b3aeeae 100644 --- a/scipy/linalg/_solvers.py +++ b/scipy/linalg/_solvers.py @@ -784,7 +784,7 @@ def _are_validate_args(a, b, q, r, e, s, eq_type='care'): q = np.atleast_2d(_asarray_validated(q, check_finite=True)) r = np.atleast_2d(_asarray_validated(r, check_finite=True)) - # Get the correct data types otherwise Numpy complains + # Get the correct data types otherwise NumPy complains # about pushing complex numbers into real arrays. r_or_c = complex if np.iscomplexobj(b) else float From 6379cc8366e7665f87bd8a10878590942c6996a3 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Wed, 21 Nov 2018 16:19:48 -0800 Subject: [PATCH 33/70] TST: test_random_sampling 32-bit handling * test_random_sampling now marked as xfail for NumPy versions prior to 1.11.0 when used in a 32-bit Python interpreter, based on observed wheel build failures --- scipy/sparse/tests/test_construct.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scipy/sparse/tests/test_construct.py b/scipy/sparse/tests/test_construct.py index 84c80ae299ad..e721f6cb39ec 100644 --- a/scipy/sparse/tests/test_construct.py +++ b/scipy/sparse/tests/test_construct.py @@ -2,6 +2,7 @@ from __future__ import division, print_function, absolute_import +import sys import numpy as np from numpy import array, matrix from numpy.testing import (assert_equal, assert_, @@ -9,6 +10,7 @@ import pytest from pytest import raises as assert_raises from scipy._lib._testutils import check_free_memory +from scipy._lib._version import NumpyVersion from scipy.sparse import csr_matrix, coo_matrix @@ -420,6 +422,9 @@ def test_block_diag_1(self): assert_equal(construct.block_diag([1]).todense(), matrix([[1]])) + @pytest.mark.xfail((NumpyVersion(np.__version__) < "1.11.0" and + sys.maxsize <= 2**32), + reason="randint not compatible") def test_random_sampling(self): # Simple sanity checks for sparse random sampling. for f in sprand, _sprandn: From 6d9046696481bbf8cb3f79708946c1bf4e275ac4 Mon Sep 17 00:00:00 2001 From: Dowon Date: Thu, 22 Nov 2018 15:35:26 +0900 Subject: [PATCH 34/70] clean up for harmonic mean test --- scipy/stats/tests/test_stats.py | 171 +++++++++++--------------------- 1 file changed, 57 insertions(+), 114 deletions(-) diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index b94c26890e18..8a312ab3b1d7 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -1231,47 +1231,6 @@ def test_relfreq(): assert_array_almost_equal(relfreqs, relfreqs2) -class TestHMean(object): - def test_1D_list(self): - a = (1,2,3,4) - actual = stats.hmean(a) - desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) - assert_almost_equal(actual, desired, decimal=14) - - desired1 = stats.hmean(array(a),axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - - def test_1D_array(self): - a = array((1,2,3,4), float64) - actual = stats.hmean(a) - desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) - assert_almost_equal(actual, desired, decimal=14) - - desired1 = stats.hmean(a,axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - - def test_2D_array_default(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - actual = stats.hmean(a) - desired = array((1.,2.,3.,4.)) - assert_array_almost_equal(actual, desired, decimal=14) - - actual1 = stats.hmean(a,axis=0) - assert_array_almost_equal(actual1, desired, decimal=14) - - def test_2D_array_dim1(self): - a = array(((1,2,3,4), - (1,2,3,4), - (1,2,3,4))) - - v = 4. / (1./1 + 1./2 + 1./3 + 1./4) - desired1 = array((v,v,v)) - actual1 = stats.hmean(a, axis=1) - assert_array_almost_equal(actual1, desired1, decimal=14) - - class TestScoreatpercentile(object): def setup_method(self): self.a1 = [3, 4, 5, 10, -3, -5, 6] @@ -3600,90 +3559,74 @@ def test_obrientransform(): assert_array_almost_equal(result[0], expected, decimal=4) -class HarMeanTestCase: - def test_1dlist(self): +class StatsTestMethod(object): + def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): + # Note this doesn't test when axis is not specified + rtol = np.float_power(10, -1.0 * significant) + x = stats.gmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): + rtol = np.float_power(10, -1.0 * significant) + x = stats.hmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + +class TestHarMean(StatsTestMethod): + def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] b = 34.1417152147 - self.do(a, b) + self.equal_hmean_test(a, b) + + a = [1, 2, 3, 4] + b = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4) + self.equal_hmean_test(a, b) - def test_1darray(self): + def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 34.1417152147 - self.do(a, b) - - def test_1dma(self): - # Test a 1d masked array - a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 34.1417152147 - self.do(a, b) - - def test_1dmavalue(self): - # Test a 1d masked array with a masked value - a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], - mask=[0,0,0,0,0,0,0,0,0,1]) - b = 31.8137186141 - self.do(a, b) + self.equal_hmean_test(a, b) # Note the next tests use axis=None as default, not axis=0 - def test_2dlist(self): + def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 38.6696271841 - self.do(a, b) + self.equal_hmean_test(a, b) - def test_2darray(self): + def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 38.6696271841 - self.do(np.array(a), b) - - def test_2dma(self): - # Test a 2d masked array - a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 38.6696271841 - self.do(np.ma.array(a), b) + self.equal_hmean_test(np.array(a), b) - def test_2daxis0(self): + def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) - self.do(a, b, axis=0) + self.equal_hmean_test(a, b, axis=0) - def test_2daxis1(self): + def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([19.2, 63.03939962, 103.80078637]) - self.do(a, b, axis=1) + self.equal_hmean_test(a, b, axis=1) - def test_2dmatrixdaxis0(self): + def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]]) - self.do(np.matrix(a), b, axis=0) + self.equal_hmean_test(np.matrix(a), b, axis=0) - def test_2dmatrixaxis1(self): + def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[19.2, 63.03939962, 103.80078637]]).T - self.do(np.matrix(a), b, axis=1) - - -class TestHarMean(HarMeanTestCase): - def do(self, a, b, axis=None, dtype=None): - x = stats.hmean(a, axis=axis, dtype=dtype) - assert_almost_equal(b, x) - assert_equal(x.dtype, dtype) - - -class StatsTestMethod(object): - def equal_test(self, array_like, desired, axis=None, dtype=None, significant=7): - # Note this doesn't test when axis is not specified - rtol = np.float_power(10, -1.0 * significant) - x = stats.gmean(array_like, axis=axis, dtype=dtype) - assert_allclose(x, desired, rtol=rtol) - assert_equal(x.dtype, dtype) + self.equal_hmean_test(np.matrix(a), b, axis=1) class TestGeoMean(StatsTestMethod): @@ -3691,94 +3634,94 @@ def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] b = 45.2872868812 - self.equal_test(a, b) + self.equal_gmean_test(a, b) a = [1, 2, 3, 4] desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.equal_test(a, desired, significant=14) + self.equal_gmean_test(a, desired, significant=14) desired1 = stats.gmean(a, axis=-1) - self.equal_test(a, desired1, axis=-1) + self.equal_gmean_test(a, desired1, axis=-1) def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 45.2872868812 - self.equal_test(a, b) + self.equal_gmean_test(a, b) a = array([1, 2, 3, 4], float32) desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.equal_test(a, desired, dtype=float32) + self.equal_gmean_test(a, desired, dtype=float32) desired1 = stats.gmean(a, axis=-1) - self.equal_test(a, desired1, axis=-1, dtype=float32) + self.equal_gmean_test(a, desired1, axis=-1, dtype=float32) # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 - self.equal_test(a, b) + self.equal_gmean_test(a, b) def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 - self.equal_test(array(a), b) + self.equal_gmean_test(array(a), b) def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) - self.equal_test(a, b, axis=0) + self.equal_gmean_test(a, b, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = array([1, 2, 3, 4]) - self.equal_test(a, desired, axis=0, significant=14) + self.equal_gmean_test(a, desired, axis=0, significant=14) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = stats.gmean(a, axis=0) - self.equal_test(a, desired, axis=0, significant=14) + self.equal_gmean_test(a, desired, axis=0, significant=14) def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([22.13363839, 64.02171746, 104.40086817]) - self.equal_test(a, b, axis=1) + self.equal_gmean_test(a, b, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = array([v, v, v]) - self.equal_test(a, desired, axis=1, significant=14) + self.equal_gmean_test(a, desired, axis=1, significant=14) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) - self.equal_test(np.matrix(a), b, axis=0) + self.equal_gmean_test(np.matrix(a), b, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix([1, 2, 3, 4]) - self.equal_test(np.matrix(a), desired, axis=0, significant=14) + self.equal_gmean_test(np.matrix(a), desired, axis=0, significant=14) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix(stats.gmean(a, axis=0)) - self.equal_test(np.matrix(a), desired, axis=0, significant=14) + self.equal_gmean_test(np.matrix(a), desired, axis=0, significant=14) def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T - self.equal_test(np.matrix(a), b, axis=1) + self.equal_gmean_test(np.matrix(a), b, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = np.matrix([[v], [v], [v]]) - self.equal_test(np.matrix(a), desired, axis=1, significant=14) + self.equal_gmean_test(np.matrix(a), desired, axis=1, significant=14) def test_large_values(self): a = array([1e100, 1e200, 1e300]) - self.equal_test(a, 1e200, significant=13) + self.equal_gmean_test(a, 1e200, significant=13) def test_1d_list0(self): # Test a 1d list with zero element @@ -3786,7 +3729,7 @@ def test_1d_list0(self): b = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.equal_test(a, b) + self.equal_gmean_test(a, b) finally: np.seterr(**olderr) @@ -3796,7 +3739,7 @@ def test_1d_array0(self): b = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.equal_test(a, b) + self.equal_gmean_test(a, b) finally: np.seterr(**olderr) From 442180678a89570f67f57a60751903e9486c57c9 Mon Sep 17 00:00:00 2001 From: Dowon Date: Thu, 22 Nov 2018 15:36:28 +0900 Subject: [PATCH 35/70] clean up for harmonic mean test of masked array --- scipy/stats/tests/test_mstats_basic.py | 97 +++++++++++++------------- 1 file changed, 50 insertions(+), 47 deletions(-) diff --git a/scipy/stats/tests/test_mstats_basic.py b/scipy/stats/tests/test_mstats_basic.py index 4616f334b47e..a5a815ae6930 100644 --- a/scipy/stats/tests/test_mstats_basic.py +++ b/scipy/stats/tests/test_mstats_basic.py @@ -43,42 +43,48 @@ def test_mquantiles_limit_keyword(self): class MStatsTestMethod(object): - def equal_test(self, array_like, desired, axis=None, dtype=None, significant=7): + def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): # Note this doesn't test when axis is not specified rtol = np.float_power(10, -1.0 * significant) x = mstats.gmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) + def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): + rtol = np.float_power(10, -1.0 * significant) + x = stats.hmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + class TestGeoMean(MStatsTestMethod): def test_1d(self): a = [1, 2, 3, 4] desired = np.power(1*2*3*4, 1./4.) - self.equal_test(a, desired, significant=14) + self.equal_gmean_test(a, desired, significant=14) desired1 = mstats.gmean(a, axis=-1) - self.equal_test(a, desired1, significant=14) + self.equal_gmean_test(a, desired1, significant=14) assert_(not isinstance(desired1, ma.MaskedArray)) def test_1d_ma(self): # Test a 1d masked array a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 45.2872868812 - self.equal_test(a, b) + self.equal_gmean_test(a, b) a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = np.power(1*2*3, 1./3.) - self.equal_test(a, desired, significant=14) + self.equal_gmean_test(a, desired, significant=14) desired1 = mstats.gmean(a, axis=-1) - self.equal_test(a, desired1, significant=14) + self.equal_gmean_test(a, desired1, significant=14) def test_1d_ma_value(self): # Test a 1d masked array with a masked value a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) b = 41.4716627439 - self.equal_test(a, b) + self.equal_gmean_test(a, b) def test_1d_ma0(self): # Test a 1d masked array with zero element @@ -86,7 +92,7 @@ def test_1d_ma0(self): b = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.equal_test(a, b) + self.equal_gmean_test(a, b) finally: np.seterr(**olderr) @@ -96,7 +102,7 @@ def test_1d_ma_inf(self): b = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.equal_test(a, b) + self.equal_gmean_test(a, b) finally: np.seterr(**olderr) @@ -104,66 +110,63 @@ def test_1d_ma_inf(self): def test_1d_float96(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired_dt = np.power(1*2*3, 1./3.).astype(np.float96) - self.equal_test(a, desired_dt, dtype=np.float96, significant=14) + self.equal_gmean_test(a, desired_dt, dtype=np.float96, significant=14) def test_2d_ma(self): a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) desired = np.array([1, 2, 3, 4]) - self.equal_test(a, desired, axis=0, significant=14) + self.equal_gmean_test(a, desired, axis=0, significant=14) desired1 = mstats.gmean(a, axis=0) - self.equal_test(a, desired1, axis=0, significant=14) + self.equal_gmean_test(a, desired1, axis=0, significant=14) desired = ma.array([np.power(1*2*3*4, 1./4.), np.power(2*3, 1./2.), np.power(1*4, 1./2.)]) - self.equal_test(a, desired, axis=-1, significant=14) + self.equal_gmean_test(a, desired, axis=-1, significant=14) # Test a 2d masked array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 - self.equal_test(np.ma.array(a), b) - + self.equal_gmean_test(np.ma.array(a), b) -class TestHMean(object): - def test_1D(self): - a = (1, 2, 3, 4) - actual = mstats.hmean(a) - desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) - assert_almost_equal(actual, desired, decimal=14) - desired1 = mstats.hmean(ma.array(a), axis=-1) - assert_almost_equal(actual, desired1, decimal=14) - a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) - actual = mstats.hmean(a) +class TestHarMean(MStatsTestMethod): + def test_1d(self): + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = 3. / (1./1 + 1./2 + 1./3) - assert_almost_equal(actual, desired, decimal=14) + self.equal_hmean_test(a, desired, significant=14) desired1 = mstats.hmean(a, axis=-1) - assert_almost_equal(actual, desired1, decimal=14) + self.equal_hmean_test(a, desired1, axis=-1, significant=14) + + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + b = 34.1417152147 + self.equal_hmean_test(a, b) + + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], + mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) + b = 31.8137186141 + self.equal_hmean_test(a, b) @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') - def test_1D_float96(self): - a = ma.array((1, 2, 3, 4), mask=(0, 0, 0, 1)) - actual_dt = mstats.hmean(a, dtype=np.float96) - desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), - dtype=np.float96) - assert_almost_equal(actual_dt, desired_dt, decimal=14) - assert_(actual_dt.dtype == desired_dt.dtype) + def test_1d_float96(self): + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) + desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96) + self.equal_hmean_test(a, desired_dt, dtype=np.float96) - def test_2D(self): - a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)), - mask=((0,0,0,0),(1,0,0,1),(0,1,1,0))) - actual = mstats.hmean(a) - desired = ma.array((1,2,3,4)) - assert_array_almost_equal(actual, desired, decimal=14) - - actual1 = mstats.hmean(a,axis=-1) - desired = (4./(1/1.+1/2.+1/3.+1/4.), - 2./(1/2.+1/3.), - 2./(1/1.+1/4.) - ) - assert_array_almost_equal(actual1, desired, decimal=14) + def test_2d(self): + a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) + desired = ma.array([1, 2, 3, 4]) + self.equal_hmean_test(a, desired, axis=0, significant=14) + + desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)] + self.equal_hmean_test(a, desired, axis=-1, significant=14) + + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + b = 38.6696271841 + self.equal_hmean_test(np.ma.array(a), b) class TestRanking(object): From 3bb385f1db852a9de856c785896155a88d44d87f Mon Sep 17 00:00:00 2001 From: Piotr Figiel Date: Thu, 22 Nov 2018 14:30:36 +0100 Subject: [PATCH 36/70] BUG: improve error handling in stats.iqr Add missing check for NaNs in rng which is expected by TestIQR.test_rng test. Closes #9516. --- scipy/stats/stats.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scipy/stats/stats.py b/scipy/stats/stats.py index 10007d231775..1064b350f4e8 100644 --- a/scipy/stats/stats.py +++ b/scipy/stats/stats.py @@ -2483,6 +2483,9 @@ def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate', if len(rng) != 2: raise TypeError("quantile range must be two element sequence") + if np.isnan(rng).any(): + raise ValueError("range must not contain NaNs") + rng = sorted(rng) pct = percentile_func(x, rng, axis=axis, interpolation=interpolation, keepdims=keepdims, contains_nan=contains_nan) From f2f64a742cc04a98a28b9b24a0ae5a65b73e884b Mon Sep 17 00:00:00 2001 From: Piotr Figiel Date: Fri, 23 Nov 2018 10:26:26 +0100 Subject: [PATCH 37/70] TST: relax precision requirements in signal.correlate tests Correlate when used with the FFT method doesn't benefit from 128-bit float types because of FFT implementation down casts input arrays to doubles. For this reason reduce the expected precision to the same amount as it's expected for the double type. Closes #9520. --- scipy/signal/tests/test_signaltools.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/scipy/signal/tests/test_signaltools.py b/scipy/signal/tests/test_signaltools.py index 5748cfda5dbf..d40fc959a1d5 100644 --- a/scipy/signal/tests/test_signaltools.py +++ b/scipy/signal/tests/test_signaltools.py @@ -1517,6 +1517,14 @@ def equal_tolerance(self, res_dt): pass return decimal + def equal_tolerance_fft(self, res_dt): + # FFT implementations convert longdouble arguments down to + # double so don't expect better precision, see gh-9520 + if res_dt == np.longdouble: + return self.equal_tolerance(np.double) + else: + return self.equal_tolerance(res_dt) + def test_method(self, dt): if dt == Decimal: method = choose_conv_method([Decimal(4)], [Decimal(3)]) @@ -1526,8 +1534,8 @@ def test_method(self, dt): y_fft = correlate(a, b, method='fft') y_direct = correlate(a, b, method='direct') - assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance(y_fft.dtype)) - assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_fft.dtype)) + assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) + assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) assert_equal(y_fft.dtype, dt) assert_equal(y_direct.dtype, dt) @@ -1658,8 +1666,13 @@ class TestCorrelateComplex(object): # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). + # Since correlate may chose to use FFT method which converts + # longdoubles to doubles internally don't expect better precision + # for longdouble than for double (see gh-9520). def decimal(self, dt): + if dt == np.clongdouble: + dt = np.cdouble return int(2 * np.finfo(dt).precision / 3) def _setup_rank1(self, dt, mode): From 5c025ab0ea5ae26fa3b548fcdfc4a50241e6c85a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 24 Nov 2018 09:39:22 -0800 Subject: [PATCH 38/70] DOC: fix missing random seed in optimize.newton example This was giving occasional CI failures in the refguide check that looked like: ``` scipy.optimize.newton --------------------- /home/travis/build/scipy/scipy/build/testenv/lib/python3.6/site-packages/scipy/optimize/zeros.py:439: RuntimeWarning: some failed to converge after 50 iterations warnings.warn(msg, RuntimeWarning) File "build/testenv/lib/python3.6/site-packages/scipy/optimize/zeros.py", line 242, in newton Failed example: np.allclose(vec_res, loop_res) Expected: True Got: False ``` --- scipy/optimize/zeros.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scipy/optimize/zeros.py b/scipy/optimize/zeros.py index 94f9ec0746c2..583ce7bd79a6 100644 --- a/scipy/optimize/zeros.py +++ b/scipy/optimize/zeros.py @@ -230,6 +230,7 @@ class of similar problems can be solved together. >>> f = lambda x, a: x**3 - a >>> fder = lambda x, a: 3 * x**2 + >>> np.random.seed(4321) >>> x = np.random.randn(100) >>> a = np.arange(-50, 50) >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, )) From fba1ab0f6e8ed6ebf90d2d4b0e4dddeda5b8bc55 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 26 Nov 2018 17:59:30 -0800 Subject: [PATCH 39/70] BUG: fix SyntaxError due to non-ascii character on Python 2.7 Closes gh-9540 --- scipy/spatial/transform/rotation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scipy/spatial/transform/rotation.py b/scipy/spatial/transform/rotation.py index 18fc86929e54..d3df924b33ce 100644 --- a/scipy/spatial/transform/rotation.py +++ b/scipy/spatial/transform/rotation.py @@ -1571,8 +1571,8 @@ def match_vectors(cls, a, b, weights=None, normalized=False): References ---------- .. [1] F. Landis Markley, - “Attitude determination using vector observations: a fast - optimal matrix algorithm”, Journal of Astronautical Sciences, + "Attitude determination using vector observations: a fast + optimal matrix algorithm", Journal of Astronautical Sciences, Vol. 41, No.2, 1993, pp. 261-280. .. [2] F. Landis Markley, "Attitude determination using vector observations and the From e305fe43cb22c1b7dd6d8407f7cb07505f0b990b Mon Sep 17 00:00:00 2001 From: Sylvain Gubian Date: Wed, 28 Nov 2018 09:17:32 +0100 Subject: [PATCH 40/70] BUG-Fix-Align-dual_annealing-interface --- scipy/optimize/_dual_annealing.py | 15 ++-- scipy/optimize/tests/test__dual_annealing.py | 72 ++++++++++---------- 2 files changed, 44 insertions(+), 43 deletions(-) diff --git a/scipy/optimize/_dual_annealing.py b/scipy/optimize/_dual_annealing.py index 3f8310022135..a47cd1910dbd 100644 --- a/scipy/optimize/_dual_annealing.py +++ b/scipy/optimize/_dual_annealing.py @@ -414,11 +414,11 @@ def local_search(self, x, e): return e, x_tmp -def dual_annealing(func, x0, bounds, args=(), maxiter=1000, +def dual_annealing(func, bounds, args=(), maxiter=1000, local_search_options={}, initial_temp=5230., restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0, maxfun=1e7, seed=None, no_local_search=False, - callback=None): + callback=None, x0=None): """ Find the global minimum of a function using Dual Annealing. @@ -429,10 +429,6 @@ def dual_annealing(func, x0, bounds, args=(), maxiter=1000, ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. - x0 : ndarray, shape(n,) - A single initial starting point coordinates. If ``None`` is provided, - initial coordinates are automatically generated (using the ``reset`` - method from the internal ``EnergyState`` class). bounds : sequence, shape (n, 2) Bounds for variables. ``(min, max)`` pairs for each element in ``x``, defining bounds for the objective function parameter. @@ -495,6 +491,8 @@ def dual_annealing(func, x0, bounds, args=(), maxiter=1000, - 2: detection done in the dual annealing process. If the callback implementation returns True, the algorithm will stop. + x0 : ndarray, shape(n,), optional + A single initial starting point coordinates. Returns ------- @@ -605,7 +603,10 @@ def dual_annealing(func, x0, bounds, args=(), maxiter=1000, raise ValueError('Some bounds values are inf values or nan values') # Checking that bounds are consistent if not np.all(lower < upper): - raise ValueError('Bounds are note consistent min < max') + raise ValueError('Bounds are not consistent min < max') + # Checking that bounds are the same length + if not len(lower) == len(upper): + raise ValueError('Bounds do not have the same dimensions') # Wrapper for the objective function func_wrapper = ObjectiveFunWrapper(func, maxfun, *args) diff --git a/scipy/optimize/tests/test__dual_annealing.py b/scipy/optimize/tests/test__dual_annealing.py index 10a9fed58f25..1e7b645042d3 100644 --- a/scipy/optimize/tests/test__dual_annealing.py +++ b/scipy/optimize/tests/test__dual_annealing.py @@ -101,41 +101,41 @@ def test_reset(self): def test_low_dim(self): ret = dual_annealing( - self.func, None, self.ld_bounds, seed=self.seed) + self.func, self.ld_bounds, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-12) def test_high_dim(self): - ret = dual_annealing(self.func, None, self.hd_bounds) + ret = dual_annealing(self.func, self.hd_bounds) assert_allclose(ret.fun, 0., atol=1e-12) def test_low_dim_no_ls(self): - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True) assert_allclose(ret.fun, 0., atol=1e-4) def test_high_dim_no_ls(self): - ret = dual_annealing(self.func, None, self.hd_bounds, + ret = dual_annealing(self.func, self.hd_bounds, no_local_search=True) assert_allclose(ret.fun, 0., atol=1e-4) def test_nb_fun_call(self): - ret = dual_annealing(self.func, None, self.ld_bounds) + ret = dual_annealing(self.func, self.ld_bounds) assert_equal(self.nb_fun_call, ret.nfev) def test_nb_fun_call_no_ls(self): - ret = dual_annealing(self.func, None, self.ld_bounds, - no_local_search=True) + ret = dual_annealing(self.func, self.ld_bounds, + no_local_search=True) assert_equal(self.nb_fun_call, ret.nfev) def test_max_reinit(self): - assert_raises(ValueError, dual_annealing, self.weirdfunc, None, - self.ld_bounds) + assert_raises(ValueError, dual_annealing, self.weirdfunc, + self.ld_bounds) def test_reproduce(self): seed = 1234 - res1 = dual_annealing(self.func, None, self.ld_bounds, seed=seed) - res2 = dual_annealing(self.func, None, self.ld_bounds, seed=seed) - res3 = dual_annealing(self.func, None, self.ld_bounds, seed=seed) + res1 = dual_annealing(self.func, self.ld_bounds, seed=seed) + res2 = dual_annealing(self.func, self.ld_bounds, seed=seed) + res3 = dual_annealing(self.func, self.ld_bounds, seed=seed) # If we have reproducible results, x components found has to # be exactly the same, which is not the case with no seeding assert_equal(res1.x, res2.x) @@ -143,22 +143,22 @@ def test_reproduce(self): def test_bounds_integrity(self): wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)] - assert_raises(ValueError, dual_annealing, self.func, None, - wrong_bounds) + assert_raises(ValueError, dual_annealing, self.func, + wrong_bounds) def test_bound_validity(self): invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)] - assert_raises(ValueError, dual_annealing, self.func, None, - invalid_bounds) + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)] - assert_raises(ValueError, dual_annealing, self.func, None, - invalid_bounds) + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)] - assert_raises(ValueError, dual_annealing, self.func, None, - invalid_bounds) + assert_raises(ValueError, dual_annealing, self.func, + invalid_bounds) def test_max_fun_ls(self): - ret = dual_annealing(self.func, None, self.ld_bounds, maxfun=100) + ret = dual_annealing(self.func, self.ld_bounds, maxfun=100) ls_max_iter = min(max( len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO, @@ -167,30 +167,30 @@ def test_max_fun_ls(self): assert ret.nfev <= 100 + ls_max_iter def test_max_fun_no_ls(self): - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True, maxfun=500) assert ret.nfev <= 500 def test_maxiter(self): - ret = dual_annealing(self.func, None, self.ld_bounds, maxiter=700) + ret = dual_annealing(self.func, self.ld_bounds, maxiter=700) assert ret.nit <= 700 # Testing that args are passed correctly for dual_annealing def test_fun_args_ls(self): - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, args=((3.14159, ))) assert_allclose(ret.fun, 3.14159, atol=1e-6) # Testing that args are passed correctly for pure simulated annealing def test_fun_args_no_ls(self): - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, args=((3.14159, )), no_local_search=True) assert_allclose(ret.fun, 3.14159, atol=1e-4) def test_callback_stop(self): # Testing that callback make the algorithm stop for # fun value <= 1.0 (see callback method) - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, callback=self.callback) assert ret.fun <= 1.0 assert 'stop early' in ret.message[0] @@ -199,7 +199,7 @@ def test_neldermed_ls_minimizer(self): minimizer_opts = { 'method': 'Nelder-Mead', } - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, local_search_options=minimizer_opts) assert_allclose(ret.fun, 0., atol=1e-6) @@ -207,7 +207,7 @@ def test_powell_ls_minimizer(self): minimizer_opts = { 'method': 'Powell', } - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, local_search_options=minimizer_opts) assert_allclose(ret.fun, 0., atol=1e-8) @@ -215,7 +215,7 @@ def test_cg_ls_minimizer(self): minimizer_opts = { 'method': 'CG', } - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, local_search_options=minimizer_opts) assert_allclose(ret.fun, 0., atol=1e-8) @@ -223,7 +223,7 @@ def test_bfgs_ls_minimizer(self): minimizer_opts = { 'method': 'BFGS', } - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, local_search_options=minimizer_opts) assert_allclose(ret.fun, 0., atol=1e-8) @@ -231,7 +231,7 @@ def test_tnc_ls_minimizer(self): minimizer_opts = { 'method': 'TNC', } - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, local_search_options=minimizer_opts) assert_allclose(ret.fun, 0., atol=1e-8) @@ -239,7 +239,7 @@ def test_colyba_ls_minimizer(self): minimizer_opts = { 'method': 'COBYLA', } - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, local_search_options=minimizer_opts) assert_allclose(ret.fun, 0., atol=1e-5) @@ -247,20 +247,20 @@ def test_slsqp_ls_minimizer(self): minimizer_opts = { 'method': 'SLSQP', } - ret = dual_annealing(self.func, None, self.ld_bounds, + ret = dual_annealing(self.func, self.ld_bounds, local_search_options=minimizer_opts) assert_allclose(ret.fun, 0., atol=1e-7) def test_wrong_restart_temp(self): - assert_raises(ValueError, dual_annealing, self.func, None, + assert_raises(ValueError, dual_annealing, self.func, self.ld_bounds, restart_temp_ratio=1) - assert_raises(ValueError, dual_annealing, self.func, None, + assert_raises(ValueError, dual_annealing, self.func, self.ld_bounds, restart_temp_ratio=0) def test_gradient_gnev(self): minimizer_opts = { 'jac': self.rosen_der_wrapper, } - ret = dual_annealing(rosen, None, self.ld_bounds, + ret = dual_annealing(rosen, self.ld_bounds, local_search_options=minimizer_opts) assert ret.njev == self.ngev From 0664b2b7e4b819eea17a26f6f0a983de0c869335 Mon Sep 17 00:00:00 2001 From: Joscha Reimer Date: Wed, 28 Nov 2018 10:14:23 +0100 Subject: [PATCH 41/70] BUG: scipy/_lib/_numpy_compat: get_randint chooses now correct low and high for old numpy versions --- scipy/_lib/_numpy_compat.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/scipy/_lib/_numpy_compat.py b/scipy/_lib/_numpy_compat.py index c4875a22d1b6..30d4092584c1 100644 --- a/scipy/_lib/_numpy_compat.py +++ b/scipy/_lib/_numpy_compat.py @@ -95,8 +95,8 @@ def get_randint(random_state): # method of RandomState does only work with int32 values. def get_randint(random_state): def randint_patched(low, high, size, dtype=np.int32): - low = max(low, np.iinfo(dtype).min) - high = min(high, np.iinfo(dtype).max) + low = max(low, np.iinfo(dtype).min, np.iinfo(np.int32).min) + high = min(high, np.iinfo(dtype).max, np.iinfo(np.int32).max) integers = random_state.randint(low, high=high, size=size) return integers.astype(dtype, copy=False) return randint_patched @@ -779,6 +779,3 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, c = dot(X, X_T.conj()) c *= 1. / np.float64(fact) return c.squeeze() - - - From ccee3ea38463c623477e8eafe9c629f9214c8592 Mon Sep 17 00:00:00 2001 From: Sylvain Gubian Date: Wed, 28 Nov 2018 10:40:02 +0100 Subject: [PATCH 42/70] Adjusting doc with the new interface --- scipy/optimize/_dual_annealing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/optimize/_dual_annealing.py b/scipy/optimize/_dual_annealing.py index a47cd1910dbd..fe7f77e50803 100644 --- a/scipy/optimize/_dual_annealing.py +++ b/scipy/optimize/_dual_annealing.py @@ -580,7 +580,7 @@ def dual_annealing(func, bounds, args=(), maxiter=1000, >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x) >>> lw = [-5.12] * 10 >>> up = [5.12] * 10 - >>> ret = dual_annealing(func, None, bounds=list(zip(lw, up)), seed=1234) + >>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234) >>> print("global minimum: xmin = {0}, f(xmin) = {1:.6f}".format( ... ret.x, ret.fun)) global minimum: xmin = [-4.26437714e-09 -3.91699361e-09 -1.86149218e-09 -3.97165720e-09 From ca74b29c84eea349c54532f2d4697023cb526629 Mon Sep 17 00:00:00 2001 From: Sylvain Gubian Date: Wed, 28 Nov 2018 21:28:48 +0100 Subject: [PATCH 43/70] Doc adjustment, wrong plural --- scipy/optimize/_dual_annealing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/optimize/_dual_annealing.py b/scipy/optimize/_dual_annealing.py index fe7f77e50803..ca9e940b4c6b 100644 --- a/scipy/optimize/_dual_annealing.py +++ b/scipy/optimize/_dual_annealing.py @@ -492,7 +492,7 @@ def dual_annealing(func, bounds, args=(), maxiter=1000, If the callback implementation returns True, the algorithm will stop. x0 : ndarray, shape(n,), optional - A single initial starting point coordinates. + Coordinates of a single n-dimensional starting point. Returns ------- From 26d13f795f8bda8d7c3721e3df64bed628bb8686 Mon Sep 17 00:00:00 2001 From: Joscha Reimer Date: Thu, 29 Nov 2018 09:25:05 +0100 Subject: [PATCH 44/70] Revert "TST: test_random_sampling 32-bit handling" This reverts commit 6379cc8366e7665f87bd8a10878590942c6996a3. --- scipy/sparse/tests/test_construct.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/scipy/sparse/tests/test_construct.py b/scipy/sparse/tests/test_construct.py index e721f6cb39ec..84c80ae299ad 100644 --- a/scipy/sparse/tests/test_construct.py +++ b/scipy/sparse/tests/test_construct.py @@ -2,7 +2,6 @@ from __future__ import division, print_function, absolute_import -import sys import numpy as np from numpy import array, matrix from numpy.testing import (assert_equal, assert_, @@ -10,7 +9,6 @@ import pytest from pytest import raises as assert_raises from scipy._lib._testutils import check_free_memory -from scipy._lib._version import NumpyVersion from scipy.sparse import csr_matrix, coo_matrix @@ -422,9 +420,6 @@ def test_block_diag_1(self): assert_equal(construct.block_diag([1]).todense(), matrix([[1]])) - @pytest.mark.xfail((NumpyVersion(np.__version__) < "1.11.0" and - sys.maxsize <= 2**32), - reason="randint not compatible") def test_random_sampling(self): # Simple sanity checks for sparse random sampling. for f in sprand, _sprandn: From f7b9c45213750c6151c7f34cba44dc69cf39cb86 Mon Sep 17 00:00:00 2001 From: Dowon Date: Fri, 30 Nov 2018 15:18:09 +0900 Subject: [PATCH 45/70] clean up variables and argument changes --- scipy/stats/tests/test_stats.py | 107 ++++++++++++++------------------ 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index 8a312ab3b1d7..e66ca50a7527 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -3560,15 +3560,13 @@ def test_obrientransform(): class StatsTestMethod(object): - def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): + def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): # Note this doesn't test when axis is not specified - rtol = np.float_power(10, -1.0 * significant) x = stats.gmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) - def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): - rtol = np.float_power(10, -1.0 * significant) + def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): x = stats.hmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) @@ -3578,168 +3576,159 @@ class TestHarMean(StatsTestMethod): def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] - b = 34.1417152147 - self.equal_hmean_test(a, b) + desired = 34.1417152147 + self.equal_hmean_test(a, desired) a = [1, 2, 3, 4] - b = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4) - self.equal_hmean_test(a, b) + desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4) + self.equal_hmean_test(a, desired) def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 34.1417152147 - self.equal_hmean_test(a, b) + desired = 34.1417152147 + self.equal_hmean_test(a, desired) # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 38.6696271841 - self.equal_hmean_test(a, b) + desired = 38.6696271841 + self.equal_hmean_test(a, desired) def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 38.6696271841 - self.equal_hmean_test(np.array(a), b) + desired = 38.6696271841 + self.equal_hmean_test(np.array(a), desired) def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) - self.equal_hmean_test(a, b, axis=0) + desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) + self.equal_hmean_test(a, desired, axis=0) def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([19.2, 63.03939962, 103.80078637]) - self.equal_hmean_test(a, b, axis=1) + desired = np.array([19.2, 63.03939962, 103.80078637]) + self.equal_hmean_test(a, desired, axis=1) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]]) - self.equal_hmean_test(np.matrix(a), b, axis=0) + desired = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]]) + self.equal_hmean_test(np.matrix(a), desired, axis=0) def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[19.2, 63.03939962, 103.80078637]]).T - self.equal_hmean_test(np.matrix(a), b, axis=1) + desired = np.matrix([[19.2, 63.03939962, 103.80078637]]).T + self.equal_hmean_test(np.matrix(a), desired, axis=1) class TestGeoMean(StatsTestMethod): def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] - b = 45.2872868812 - self.equal_gmean_test(a, b) + desired = 45.2872868812 + self.equal_gmean_test(a, desired) a = [1, 2, 3, 4] desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.equal_gmean_test(a, desired, significant=14) - - desired1 = stats.gmean(a, axis=-1) - self.equal_gmean_test(a, desired1, axis=-1) + self.equal_gmean_test(a, desired, rtol=1e-14) def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 45.2872868812 - self.equal_gmean_test(a, b) + desired = 45.2872868812 + self.equal_gmean_test(a, desired) a = array([1, 2, 3, 4], float32) desired = power(1 * 2 * 3 * 4, 1. / 4.) self.equal_gmean_test(a, desired, dtype=float32) - desired1 = stats.gmean(a, axis=-1) - self.equal_gmean_test(a, desired1, axis=-1, dtype=float32) - # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 52.8885199 - self.equal_gmean_test(a, b) + desired = 52.8885199 + self.equal_gmean_test(a, desired) def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 52.8885199 - self.equal_gmean_test(array(a), b) + desired = 52.8885199 + self.equal_gmean_test(array(a), desired) def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) - self.equal_gmean_test(a, b, axis=0) + desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) + self.equal_gmean_test(a, desired, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = array([1, 2, 3, 4]) - self.equal_gmean_test(a, desired, axis=0, significant=14) - - a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) - desired = stats.gmean(a, axis=0) - self.equal_gmean_test(a, desired, axis=0, significant=14) + self.equal_gmean_test(a, desired, axis=0, rtol=1e-14) def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.array([22.13363839, 64.02171746, 104.40086817]) - self.equal_gmean_test(a, b, axis=1) + desired = np.array([22.13363839, 64.02171746, 104.40086817]) + self.equal_gmean_test(a, desired, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = array([v, v, v]) - self.equal_gmean_test(a, desired, axis=1, significant=14) + self.equal_gmean_test(a, desired, axis=1, rtol=1e-14) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) - self.equal_gmean_test(np.matrix(a), b, axis=0) + desired = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) + self.equal_gmean_test(np.matrix(a), desired, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix([1, 2, 3, 4]) - self.equal_gmean_test(np.matrix(a), desired, axis=0, significant=14) + self.equal_gmean_test(np.matrix(a), desired, axis=0, rtol=1e-14) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix(stats.gmean(a, axis=0)) - self.equal_gmean_test(np.matrix(a), desired, axis=0, significant=14) + self.equal_gmean_test(np.matrix(a), desired, axis=0, rtol=1e-14) def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T - self.equal_gmean_test(np.matrix(a), b, axis=1) + desired = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T + self.equal_gmean_test(np.matrix(a), desired, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = np.matrix([[v], [v], [v]]) - self.equal_gmean_test(np.matrix(a), desired, axis=1, significant=14) + self.equal_gmean_test(np.matrix(a), desired, axis=1, rtol=1e-14) def test_large_values(self): a = array([1e100, 1e200, 1e300]) - self.equal_gmean_test(a, 1e200, significant=13) + desired = 1e200 + self.equal_gmean_test(a, desired, rtol=1e-13) def test_1d_list0(self): # Test a 1d list with zero element a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0] - b = 0.0 # due to exp(-inf)=0 + desired = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, b) + self.equal_gmean_test(a, desired) finally: np.seterr(**olderr) def test_1d_array0(self): # Test a 1d array with zero element a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) - b = 0.0 # due to exp(-inf)=0 + desired = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, b) + self.equal_gmean_test(a, desired) finally: np.seterr(**olderr) From 875ec794d490666ddbb8decc8166f8cd3fa96f3b Mon Sep 17 00:00:00 2001 From: Dowon Date: Fri, 30 Nov 2018 15:19:18 +0900 Subject: [PATCH 46/70] clean up variables and argument changes --- scipy/stats/tests/test_mstats_basic.py | 66 ++++++++++---------------- 1 file changed, 26 insertions(+), 40 deletions(-) diff --git a/scipy/stats/tests/test_mstats_basic.py b/scipy/stats/tests/test_mstats_basic.py index a5a815ae6930..b416931f3788 100644 --- a/scipy/stats/tests/test_mstats_basic.py +++ b/scipy/stats/tests/test_mstats_basic.py @@ -43,15 +43,13 @@ def test_mquantiles_limit_keyword(self): class MStatsTestMethod(object): - def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): + def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): # Note this doesn't test when axis is not specified - rtol = np.float_power(10, -1.0 * significant) x = mstats.gmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) - def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, significant=7): - rtol = np.float_power(10, -1.0 * significant) + def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): x = stats.hmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) @@ -61,48 +59,41 @@ class TestGeoMean(MStatsTestMethod): def test_1d(self): a = [1, 2, 3, 4] desired = np.power(1*2*3*4, 1./4.) - self.equal_gmean_test(a, desired, significant=14) - - desired1 = mstats.gmean(a, axis=-1) - self.equal_gmean_test(a, desired1, significant=14) - assert_(not isinstance(desired1, ma.MaskedArray)) + self.equal_gmean_test(a, desired, rtol=1e-14) def test_1d_ma(self): # Test a 1d masked array a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 45.2872868812 - self.equal_gmean_test(a, b) + desired = 45.2872868812 + self.equal_gmean_test(a, desired) a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = np.power(1*2*3, 1./3.) - self.equal_gmean_test(a, desired, significant=14) - - desired1 = mstats.gmean(a, axis=-1) - self.equal_gmean_test(a, desired1, significant=14) + self.equal_gmean_test(a, desired, rtol=1e-14) def test_1d_ma_value(self): # Test a 1d masked array with a masked value a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) - b = 41.4716627439 - self.equal_gmean_test(a, b) + desired = 41.4716627439 + self.equal_gmean_test(a, desired) def test_1d_ma0(self): # Test a 1d masked array with zero element a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) - b = 41.4716627439 + desired = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, b) + self.equal_gmean_test(a, desired) finally: np.seterr(**olderr) def test_1d_ma_inf(self): # Test a 1d masked array with negative element a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) - b = 41.4716627439 + desired = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, b) + self.equal_gmean_test(a, desired) finally: np.seterr(**olderr) @@ -110,44 +101,39 @@ def test_1d_ma_inf(self): def test_1d_float96(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired_dt = np.power(1*2*3, 1./3.).astype(np.float96) - self.equal_gmean_test(a, desired_dt, dtype=np.float96, significant=14) + self.equal_gmean_test(a, desired_dt, dtype=np.float96, rtol=1e-14) def test_2d_ma(self): a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) desired = np.array([1, 2, 3, 4]) - self.equal_gmean_test(a, desired, axis=0, significant=14) - - desired1 = mstats.gmean(a, axis=0) - self.equal_gmean_test(a, desired1, axis=0, significant=14) + self.equal_gmean_test(a, desired, axis=0, rtol=1e-14) desired = ma.array([np.power(1*2*3*4, 1./4.), np.power(2*3, 1./2.), np.power(1*4, 1./2.)]) - self.equal_gmean_test(a, desired, axis=-1, significant=14) + self.equal_gmean_test(a, desired, axis=-1, rtol=1e-14) # Test a 2d masked array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 52.8885199 - self.equal_gmean_test(np.ma.array(a), b) + desired = 52.8885199 + self.equal_gmean_test(np.ma.array(a), desired) class TestHarMean(MStatsTestMethod): def test_1d(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = 3. / (1./1 + 1./2 + 1./3) - self.equal_hmean_test(a, desired, significant=14) - desired1 = mstats.hmean(a, axis=-1) - self.equal_hmean_test(a, desired1, axis=-1, significant=14) + self.equal_hmean_test(a, desired, rtol=1e-14) a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) - b = 34.1417152147 - self.equal_hmean_test(a, b) + desired = 34.1417152147 + self.equal_hmean_test(a, desired) a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) - b = 31.8137186141 - self.equal_hmean_test(a, b) + desired = 31.8137186141 + self.equal_hmean_test(a, desired) @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') def test_1d_float96(self): @@ -159,14 +145,14 @@ def test_2d(self): a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) desired = ma.array([1, 2, 3, 4]) - self.equal_hmean_test(a, desired, axis=0, significant=14) + self.equal_hmean_test(a, desired, axis=0, rtol=1e-14) desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)] - self.equal_hmean_test(a, desired, axis=-1, significant=14) + self.equal_hmean_test(a, desired, axis=-1, rtol=1e-14) a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] - b = 38.6696271841 - self.equal_hmean_test(np.ma.array(a), b) + desired = 38.6696271841 + self.equal_hmean_test(np.ma.array(a), desired) class TestRanking(object): From 98e45a23067e762d6dc56fe2048240f0c2b59cb3 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 29 Nov 2018 22:21:29 -0800 Subject: [PATCH 47/70] CI: run PyPy tests on CircleCI with single-threaded OpenBLAS. Closes gh-9530. Closes gh-9508. Skip TravisCI tests: [ci skip] --- .circleci/config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 40f660575b6e..04ebe1435621 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -137,6 +137,9 @@ jobs: command: | # CircleCI has 4G memory limit, play it safe export SCIPY_AVAILABLE_MEM=1G + # Limit OpenBLAS to 1 thread, PyPy crashes otherwise (at least for + # OpenBLAS v0.3.0), see gh-9530. + export OPENBLAS_NUM_THREADS=1 pypy3 runtests.py -- -rfEX -n 3 --durations=30 From 517fb558887b68bd158cd90f5cc6306ba5a6f9e9 Mon Sep 17 00:00:00 2001 From: Christoph Deil Date: Fri, 30 Nov 2018 18:33:41 +0100 Subject: [PATCH 48/70] Tiny docstring format fix in scipy.optimize.shgo --- scipy/optimize/_shgo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/optimize/_shgo.py b/scipy/optimize/_shgo.py index 35b7ee5f1af2..afa3bd86a7b8 100644 --- a/scipy/optimize/_shgo.py +++ b/scipy/optimize/_shgo.py @@ -315,7 +315,7 @@ def shgo(func, bounds, args=(), constraints=None, n=100, iters=1, callback=None, (array([ 512. , 404.23180542]), -959.64066272085051) `shgo` also has a return for any other local minima that was found, these - can be called using: + can be called using: >>> result.xl array([[ 512. , 404.23180542], From ef3d4dfa6ead2d558461e8df0ae47f735cec3f14 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 2 Dec 2018 20:56:33 +1100 Subject: [PATCH 49/70] MAINT: brute-test with instance method --- scipy/optimize/tests/test_optimize.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/scipy/optimize/tests/test_optimize.py b/scipy/optimize/tests/test_optimize.py index b96faec95d6c..7259e462bbca 100644 --- a/scipy/optimize/tests/test_optimize.py +++ b/scipy/optimize/tests/test_optimize.py @@ -1262,6 +1262,10 @@ def setup_method(self): self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) self.solution = np.array([-1.05665192, 1.80834843]) + def brute_func(self, z, *params): + # an instance method optimizing + return brute_func(z, *params) + def test_brute(self): # test fmin resbrute = optimize.brute(brute_func, self.rranges, args=self.params, @@ -1278,6 +1282,13 @@ def test_brute(self): assert_allclose(resbrute[1], brute_func(self.solution, *self.params), atol=1e-3) + # test that brute can optimize an instance method (the other tests use + # a non-class based function + resbrute = optimize.brute(self.brute_func, self.rranges, + args=self.params, full_output=True, + finish=optimize.minimize) + assert_allclose(resbrute[0], self.solution, atol=1e-3) + def test_1D(self): # test that for a 1D problem the test function is passed an array, # not a scalar. @@ -1296,8 +1307,8 @@ def test_workers(self): resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params, full_output=True, finish=None, workers=2) - assert_equal(resbrute1[-1], resbrute[-1]) - assert_equal(resbrute1[0], resbrute[0]) + assert_allclose(resbrute1[-1], resbrute[-1]) + assert_allclose(resbrute1[0], resbrute[0]) class TestIterationLimits(object): From 9cdcaffd962d0fdd6aca607a7bcd455472c65a8a Mon Sep 17 00:00:00 2001 From: mattip Date: Sat, 1 Dec 2018 17:24:28 -0800 Subject: [PATCH 50/70] ENH: remove noprefix.h, change code appropriately --- scipy/signal/S_bspline_util.c | 15 +++--- scipy/signal/correlate_nd.c.src | 11 ++--- scipy/signal/firfilter.c | 74 ++++++++++++++-------------- scipy/signal/lfilter.c.src | 78 +++++++++++++++--------------- scipy/signal/medianfilter.c | 10 ++-- scipy/signal/sigtools.h | 11 +++-- scipy/signal/sigtoolsmodule.c | 85 ++++++++++++++++----------------- 7 files changed, 144 insertions(+), 140 deletions(-) diff --git a/scipy/signal/S_bspline_util.c b/scipy/signal/S_bspline_util.c index 97fed7d0e784..1d8962342d47 100644 --- a/scipy/signal/S_bspline_util.c +++ b/scipy/signal/S_bspline_util.c @@ -5,7 +5,7 @@ #include #include #define NO_IMPORT_ARRAY -#include "numpy/noprefix.h" +#include "numpy/ndarrayobject.h" void compute_root_from_lambda(double, double *, double *); @@ -21,10 +21,11 @@ void S_IIR_order2(float,float,float,float*,float*,int,int,int); void S_IIR_order2_cascade(float,float,float,float,float*,float*,int,int,int); int S_IIR_forback1(float,float,float*,float*,int,int,int,float); void S_FIR_mirror_symmetric(float*,float*,int,float*,int,int,int); -int S_separable_2Dconvolve_mirror(float*,float*,int,int,float*,float*,int,int,intp*,intp*); +int S_separable_2Dconvolve_mirror(float*,float*,int,int,float*,float*,int,int, + npy_intp*,npy_intp*); int S_IIR_forback2(double,double,float*,float*,int,int,int,float); -int S_cubic_spline2D(float*,float*,int,int,double,intp*,intp*,float); -int S_quadratic_spline2D(float*,float*,int,int,double,intp*,intp*,float); +int S_cubic_spline2D(float*,float*,int,int,double,npy_intp*,npy_intp*,float); +int S_quadratic_spline2D(float*,float*,int,int,double,npy_intp*,npy_intp*,float); /* Implement the following difference equation */ /* y[n] = a1 * x[n] + a2 * y[n-1] */ @@ -235,7 +236,7 @@ S_FIR_mirror_symmetric (float *in, float *out, int N, float *h, int Nh, int S_separable_2Dconvolve_mirror(float *in, float *out, int M, int N, float *hr, float *hc, int Nhr, - int Nhc, intp *instrides, intp *outstrides) { + int Nhc, npy_intp *instrides, npy_intp *outstrides) { int m, n; float *tmpmem; float *inptr=NULL, *outptr=NULL; @@ -458,7 +459,7 @@ S_IIR_forback2 (double r, double omega, float *x, float *y, int S_cubic_spline2D(float *image, float *coeffs, int M, int N, double lambda, - intp *strides, intp *cstrides, float precision) { + npy_intp *strides, npy_intp *cstrides, float precision) { double r, omega; float *inptr; float *coptr; @@ -545,7 +546,7 @@ S_cubic_spline2D(float *image, float *coeffs, int M, int N, double lambda, int S_quadratic_spline2D(float *image, float *coeffs, int M, int N, double lambda, - intp *strides, intp *cstrides, float precision) { + npy_intp *strides, npy_intp *cstrides, float precision) { double r; float *inptr; float *coptr; diff --git a/scipy/signal/correlate_nd.c.src b/scipy/signal/correlate_nd.c.src index 5eeb30ab3b0a..1d3c3c17cb69 100644 --- a/scipy/signal/correlate_nd.c.src +++ b/scipy/signal/correlate_nd.c.src @@ -5,8 +5,7 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL _scipy_signal_ARRAY_API #define NO_IMPORT_ARRAY -#include - +#include "numpy/ndarrayobject.h" #include "sigtools.h" enum { @@ -107,10 +106,10 @@ clean_ax: */ /**begin repeat - * #fsuf = ubyte, byte, ushort, short, uint, int, ulong, long, ulonglong, - * longlong, float, double, longdouble# - * #type = ubyte, byte, ushort, short, uint, int, ulong, long, ulonglong, - * longlong, float, double, npy_longdouble# + * #fsuf = ubyte, byte, ushort, short, uint, int, ulong, + * long, ulonglong, longlong, float, double, longdouble# + * #type = npy_ubyte, npy_byte, npy_ushort, short, npy_uint, int, npy_ulong, + * long, npy_ulonglong, npy_longlong, float, double, npy_longdouble# */ static int _imp_correlate_nd_@fsuf@(PyArrayNeighborhoodIterObject *curx, diff --git a/scipy/signal/firfilter.c b/scipy/signal/firfilter.c index 21d40607ea36..0545331cbae2 100644 --- a/scipy/signal/firfilter.c +++ b/scipy/signal/firfilter.c @@ -1,31 +1,32 @@ #define NO_IMPORT_ARRAY +#include "numpy/ndarrayobject.h" #include "sigtools.h" -static int elsizes[] = {sizeof(Bool), - sizeof(byte), - sizeof(ubyte), - sizeof(short), - sizeof(ushort), +static int elsizes[] = {sizeof(npy_bool), + sizeof(npy_byte), + sizeof(npy_ubyte), + sizeof(npy_short), + sizeof(npy_ushort), sizeof(int), - sizeof(uint), - sizeof(long), - sizeof(ulong), - sizeof(longlong), - sizeof(ulonglong), + sizeof(npy_uint), + sizeof(long), + sizeof(npy_ulong), + sizeof(npy_longlong), + sizeof(npy_ulonglong), sizeof(float), sizeof(double), - sizeof(longdouble), - sizeof(cfloat), - sizeof(cdouble), - sizeof(clongdouble), + sizeof(npy_longdouble), + sizeof(npy_cfloat), + sizeof(npy_cdouble), + sizeof(npy_clongdouble), sizeof(void *), 0,0,0,0}; -typedef void (OneMultAddFunction) (char *, char *, intp, char **, intp); +typedef void (OneMultAddFunction) (char *, char *, npy_intp, char **, npy_intp); #define MAKE_ONEMULTADD(fname, type) \ -static void fname ## _onemultadd(char *sum, char *term1, intp str, char **pvals, intp n) { \ - intp k; \ +static void fname ## _onemultadd(char *sum, char *term1, npy_intp str, char **pvals, npy_intp n) { \ + npy_intp k; \ type dsum = *(type*)sum; \ for (k=0; k < n; k++) { \ type tmp = *(type*)(term1 + k * str); \ @@ -34,21 +35,21 @@ static void fname ## _onemultadd(char *sum, char *term1, intp str, char **pvals, *(type*)(sum) = dsum; \ } -MAKE_ONEMULTADD(UBYTE, ubyte) -MAKE_ONEMULTADD(USHORT, ushort) -MAKE_ONEMULTADD(UINT, uint) -MAKE_ONEMULTADD(ULONG, ulong) -MAKE_ONEMULTADD(ULONGLONG, ulonglong) +MAKE_ONEMULTADD(UBYTE, npy_ubyte) +MAKE_ONEMULTADD(USHORT, npy_ushort) +MAKE_ONEMULTADD(UINT, npy_uint) +MAKE_ONEMULTADD(ULONG, npy_ulong) +MAKE_ONEMULTADD(ULONGLONG, npy_ulonglong) -MAKE_ONEMULTADD(BYTE, byte) +MAKE_ONEMULTADD(BYTE, npy_byte) MAKE_ONEMULTADD(SHORT, short) MAKE_ONEMULTADD(INT, int) MAKE_ONEMULTADD(LONG, long) -MAKE_ONEMULTADD(LONGLONG, longlong) +MAKE_ONEMULTADD(LONGLONG, npy_longlong) MAKE_ONEMULTADD(FLOAT, float) MAKE_ONEMULTADD(DOUBLE, double) -MAKE_ONEMULTADD(LONGDOUBLE, longdouble) +MAKE_ONEMULTADD(LONGDOUBLE, npy_longdouble) #ifdef __GNUC__ MAKE_ONEMULTADD(CFLOAT, __complex__ float) @@ -64,18 +65,19 @@ static void fname ## _onemultadd2(char *sum, char *term1, char *term2) { \ return; } #define MAKE_C_ONEMULTADD2(fname, type) \ -static void fname ## _onemultadd(char *sum, char *term1, intp str, char **pvals, intp n) { \ - intp k; \ +static void fname ## _onemultadd(char *sum, char *term1, npy_intp str, + char **pvals, npy_intp n) { \ + npy_intp k; \ for (k=0; k < n; k++) { \ fname ## _onemultadd2(sum, term1 + k * str, pvals[k]); \ } \ } MAKE_C_ONEMULTADD(CFLOAT, float) MAKE_C_ONEMULTADD(CDOUBLE, double) -MAKE_C_ONEMULTADD(CLONGDOUBLE, longdouble) +MAKE_C_ONEMULTADD(CLONGDOUBLE, npy_longdouble) MAKE_C_ONEMULTADD2(CFLOAT, float) MAKE_C_ONEMULTADD2(CDOUBLE, double) -MAKE_C_ONEMULTADD2(CLONGDOUBLE, longdouble) +MAKE_C_ONEMULTADD2(CLONGDOUBLE, npy_longdouble) #endif /* __GNUC__ */ static OneMultAddFunction *OneMultAdd[]={NULL, @@ -102,13 +104,13 @@ static OneMultAddFunction *OneMultAdd[]={NULL, int pylab_convolve_2d (char *in, /* Input data Ns[0] x Ns[1] */ - intp *instr, /* Input strides */ + npy_intp *instr, /* Input strides */ char *out, /* Output data */ - intp *outstr, /* Output strides */ + npy_intp *outstr, /* Output strides */ char *hvals, /* coefficients in filter */ - intp *hstr, /* coefficients strides */ - intp *Nwin, /* Size of kernel Nwin[0] x Nwin[1] */ - intp *Ns, /* Size of image Ns[0] x Ns[1] */ + npy_intp *hstr, /* coefficients strides */ + npy_intp *Nwin, /* Size of kernel Nwin[0] x Nwin[1] */ + npy_intp *Ns, /* Size of image Ns[0] x Ns[1] */ int flag, /* convolution parameters */ char *fillvalue) /* fill value */ { @@ -180,13 +182,13 @@ int pylab_convolve_2d (char *in, /* Input data Ns[0] x Ns[1] */ if (!bounds_pad_flag) ind0_memory = ind0*instr[0]; if (bounds_pad_flag) { - intp k; + npy_intp k; for (k=0; k < Nwin[1]; k++) { indices[k] = fillvalue; } } else { - intp k; + npy_intp k; for (k=0; k < Nwin[1]; k++) { ind1 = convolve ? (new_n-k) : (new_n+k); if (ind1 < 0) { diff --git a/scipy/signal/lfilter.c.src b/scipy/signal/lfilter.c.src index 9a9986462348..def607b8d855 100644 --- a/scipy/signal/lfilter.c.src +++ b/scipy/signal/lfilter.c.src @@ -5,7 +5,7 @@ #include #define PY_ARRAY_UNIQUE_SYMBOL _scipy_signal_ARRAY_API #define NO_IMPORT_ARRAY -#include +#include #include #if PY_VERSION_HEX >= 0x03000000 @@ -15,28 +15,29 @@ #include "sigtools.h" static void FLOAT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y); static void DOUBLE_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y); static void EXTENDED_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y); static void CFLOAT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y); static void CDOUBLE_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y); static void CEXTENDED_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y); static void OBJECT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y); + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y); -typedef void (BasicFilterFunction) (char *, char *, char *, char *, char *, intp, uintp, intp, intp); +typedef void (BasicFilterFunction) (char *, char *, char *, char *, char *, + npy_intp, npy_uintp, npy_intp, npy_intp); static BasicFilterFunction *BasicFilterFunctions[256]; @@ -65,9 +66,10 @@ RawFilter(const PyArrayObject * b, const PyArrayObject * a, BasicFilterFunction * filter_func); PyObject* -convert_shape_to_errmsg(intp ndim, intp *Xshape, intp *Vishape, intp theaxis, intp val) +convert_shape_to_errmsg(npy_intp ndim, npy_intp *Xshape, npy_intp *Vishape, + npy_intp theaxis, npy_intp val) { - intp j, expect_size; + npy_intp j, expect_size; PyObject *msg, *tmp, *msg1, *tmp1; if (ndim == 1) { @@ -133,8 +135,8 @@ scipy_signal_sigtools_linear_filter(PyObject * NPY_UNUSED(dummy), PyObject * arg PyArrayObject *arY, *arb, *ara, *arX, *arVi, *arVf; int axis, typenum, theaxis, st, Vi_needs_broadcasted = 0; char *ara_ptr, input_flag = 0, *azero; - intp na, nb, nal, zi_size; - intp zf_shape[NPY_MAXDIMS]; + npy_intp na, nb, nal, zi_size; + npy_intp zf_shape[NPY_MAXDIMS]; BasicFilterFunction *basic_filter; axis = -1; @@ -229,7 +231,7 @@ scipy_signal_sigtools_linear_filter(PyObject * NPY_UNUSED(dummy), PyObject * arg nb = PyArray_SIZE(arb); zi_size = (na > nb ? na : nb) - 1; if (input_flag) { - intp k, Vik, Xk; + npy_intp k, Vik, Xk; for (k = 0; k < PyArray_NDIM(arX); ++k) { Vik = PyArray_DIM(arVi, k); Xk = PyArray_DIM(arX, k); @@ -345,10 +347,10 @@ scipy_signal_sigtools_linear_filter(PyObject * NPY_UNUSED(dummy), PyObject * arg * 0s */ static int -zfill(const PyArrayObject * x, intp nx, char *xzfilled, intp nxzfilled) +zfill(const PyArrayObject * x, npy_intp nx, char *xzfilled, npy_intp nxzfilled) { char *xzero; - intp i, nxl; + npy_intp i, nxl; PyArray_CopySwapFunc *copyswap = PyArray_DESCR((PyArrayObject *)x)->f->copyswap; nxl = PyArray_ITEMSIZE(x); @@ -389,9 +391,9 @@ RawFilter(const PyArrayObject * b, const PyArrayObject * a, BasicFilterFunction * filter_func) { PyArrayIterObject *itx, *ity, *itzi = NULL, *itzf = NULL; - intp nitx, i, nxl, nzfl, j; - intp na, nb, nal, nbl; - intp nfilt; + npy_intp nitx, i, nxl, nzfl, j; + npy_intp na, nb, nal, nbl; + npy_intp nfilt; char *azfilled, *bzfilled, *zfzfilled, *yoyo; PyArray_CopySwapFunc *copyswap = PyArray_DESCR((PyArrayObject *)x)->f->copyswap; @@ -552,8 +554,8 @@ fail: * #NAME = FLOAT, DOUBLE, EXTENDED# */ static void @NAME@_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y) + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y) { char *ptr_x = x, *ptr_y = y; @type@ *ptr_Z; @@ -561,8 +563,8 @@ static void @NAME@_filt(char *b, char *a, char *x, char *y, char *Z, @type@ *ptr_a = (@type@*)a; @type@ *xn, *yn; const @type@ a0 = *((@type@ *) a); - intp n; - uintp k; + npy_intp n; + npy_uintp k; /* normalize the filter coefs only once. */ for (n = 0; n < len_b; ++n) { @@ -600,8 +602,8 @@ static void @NAME@_filt(char *b, char *a, char *x, char *y, char *Z, } static void C@NAME@_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y) + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y) { char *ptr_x = x, *ptr_y = y; @type@ *ptr_Z, *ptr_b; @@ -610,8 +612,8 @@ static void C@NAME@_filt(char *b, char *a, char *x, char *y, char *Z, @type@ a0r = ((@type@ *) a)[0]; @type@ a0i = ((@type@ *) a)[1]; @type@ a0_mag, tmpr, tmpi; - intp n; - uintp k; + npy_intp n; + npy_uintp k; a0_mag = a0r * a0r + a0i * a0i; for (k = 0; k < len_x; k++) { @@ -669,8 +671,8 @@ static void C@NAME@_filt(char *b, char *a, char *x, char *y, char *Z, /**end repeat**/ static void OBJECT_filt(char *b, char *a, char *x, char *y, char *Z, - intp len_b, uintp len_x, intp stride_X, - intp stride_Y) + npy_intp len_b, npy_uintp len_x, npy_intp stride_X, + npy_intp stride_Y) { char *ptr_x = x, *ptr_y = y; PyObject **ptr_Z, **ptr_b; @@ -678,8 +680,8 @@ static void OBJECT_filt(char *b, char *a, char *x, char *y, char *Z, PyObject **xn, **yn; PyObject **a0 = (PyObject **) a; PyObject *tmp1, *tmp2, *tmp3; - intp n; - uintp k; + npy_intp n; + npy_uintp k; /* My reference counting might not be right */ for (k = 0; k < len_x; k++) { diff --git a/scipy/signal/medianfilter.c b/scipy/signal/medianfilter.c index ad90ca9533dc..664c1fb08a24 100644 --- a/scipy/signal/medianfilter.c +++ b/scipy/signal/medianfilter.c @@ -3,13 +3,13 @@ #include "Python.h" #define NO_IMPORT_ARRAY -#include "numpy/noprefix.h" +#include "numpy/ndarrayobject.h" /* defined below */ -void f_medfilt2(float*,float*,intp*,intp*); -void d_medfilt2(double*,double*,intp*,intp*); -void b_medfilt2(unsigned char*,unsigned char*,intp*,intp*); +void f_medfilt2(float*,float*,npy_intp*,npy_intp*); +void d_medfilt2(double*,double*,npy_intp*,npy_intp*); +void b_medfilt2(unsigned char*,unsigned char*,npy_intp*,npy_intp*); extern char *check_malloc (int); @@ -72,7 +72,7 @@ TYPE NAME(TYPE arr[], int n) \ /* 2-D median filter with zero-padding on edges. */ #define MEDIAN_FILTER_2D(NAME, TYPE, SELECT) \ -void NAME(TYPE* in, TYPE* out, intp* Nwin, intp* Ns) \ +void NAME(TYPE* in, TYPE* out, npy_intp* Nwin, npy_intp* Ns) \ { \ int nx, ny, hN[2]; \ int pre_x, pre_y, pos_x, pos_y; \ diff --git a/scipy/signal/sigtools.h b/scipy/signal/sigtools.h index 96ea1f9ca6fb..1b9cf7f7ed6b 100644 --- a/scipy/signal/sigtools.h +++ b/scipy/signal/sigtools.h @@ -2,7 +2,6 @@ #define _SCIPY_PRIVATE_SIGNAL_SIGTOOLS_H_ #include "Python.h" -#include "numpy/noprefix.h" #define BOUNDARY_MASK 12 #define OUTSIZE_MASK 3 @@ -33,7 +32,7 @@ typedef struct { typedef struct { char *data; - intp numels; + npy_intp numels; int elsize; char *zero; /* Pointer to Representation of zero */ } Generic_Vector; @@ -41,13 +40,15 @@ typedef struct { typedef struct { char *data; int nd; - intp *dimensions; + npy_intp *dimensions; int elsize; - intp *strides; + npy_intp *strides; char *zero; /* Pointer to Representation of zero */ } Generic_Array; -typedef void (MultAddFunction) (char *, intp, char *, intp, char *, intp *, intp *, int, intp, int, intp *, intp *, uintp *); +typedef void (MultAddFunction) (char *, npy_intp, char *, npy_intp, char *, + npy_intp *, npy_intp *, int, npy_intp, int, + npy_intp *, npy_intp *, npy_uintp *); PyObject* scipy_signal_sigtools_linear_filter(PyObject * NPY_UNUSED(dummy), PyObject * args); diff --git a/scipy/signal/sigtoolsmodule.c b/scipy/signal/sigtoolsmodule.c index 0f3cd60ea313..ea55cca7f3a1 100644 --- a/scipy/signal/sigtoolsmodule.c +++ b/scipy/signal/sigtoolsmodule.c @@ -5,9 +5,8 @@ Permission to use, copy, modify, and distribute this software without fee is granted under the SciPy License. */ #include - #define PY_ARRAY_UNIQUE_SYMBOL _scipy_signal_ARRAY_API -#include +#include "numpy/ndarrayobject.h" #include "sigtools.h" #include @@ -39,7 +38,7 @@ order filtering, however uses python-specific constructs in its guts and is therefore Python dependent. This could be changed in a straightforward way but I haven't done it for lack of time.*/ -static int index_out_of_bounds(intp *indices, intp *max_indices, int ndims) { +static int index_out_of_bounds(npy_intp *indices, npy_intp *max_indices, int ndims) { int bad_index = 0, k = 0; while (!bad_index && (k++ < ndims)) { @@ -56,11 +55,11 @@ static int index_out_of_bounds(intp *indices, intp *max_indices, int ndims) { * but probably with dim1 being the size of the "original, unsliced" array */ -static intp compute_offsets (uintp *offsets, intp *offsets2, intp *dim1, - intp *dim2, intp *dim3, intp *mode_dep, +static npy_intp compute_offsets (npy_uintp *offsets, npy_intp *offsets2, npy_intp *dim1, + npy_intp *dim2, npy_intp *dim3, npy_intp *mode_dep, int nd) { int k,i; - intp init_offset = 0; + npy_intp init_offset = 0; for (k = 0; k < nd - 1; k++) { @@ -93,7 +92,7 @@ static intp compute_offsets (uintp *offsets, intp *offsets2, intp *dim1, /* increment by 1 the index into an N-D array, doing the necessary carrying when the index reaches the dimension along that axis */ -static int increment(intp *ret_ind, int nd, intp *max_ind) { +static int increment(npy_intp *ret_ind, int nd, npy_intp *max_ind) { int k, incr = 1; k = nd - 1; @@ -768,13 +767,13 @@ static int pre_remez(double *h2, int numtaps, int numbands, double *bands, static void fill_buffer(char *ip1, PyArrayObject *ap1, PyArrayObject *ap2, char *sort_buffer, int nels2, int check, - intp *loop_ind, intp *temp_ind, uintp *offset){ + npy_intp *loop_ind, npy_intp *temp_ind, npy_uintp *offset){ int i, k, incr = 1; int ndims = PyArray_NDIM(ap1); - intp *dims2 = PyArray_DIMS(ap2); - intp *dims1 = PyArray_DIMS(ap1); - intp is1 = PyArray_ITEMSIZE(ap1); - intp is2 = PyArray_ITEMSIZE(ap2); + npy_intp *dims2 = PyArray_DIMS(ap2); + npy_intp *dims1 = PyArray_DIMS(ap1); + npy_intp is1 = PyArray_ITEMSIZE(ap1); + npy_intp is2 = PyArray_ITEMSIZE(ap2); char *ip2 = PyArray_DATA(ap2); int elsize = PyArray_ITEMSIZE(ap1); char *ptr; @@ -811,17 +810,17 @@ int fname(type *ip1, type *ip2) { return *ip1 < *ip2 ? -1 : *ip1 == *ip2 ? 0 : 1 COMPARE(DOUBLE_compare, double) COMPARE(FLOAT_compare, float) -COMPARE(LONGDOUBLE_compare, longdouble) -COMPARE(BYTE_compare, byte) +COMPARE(LONGDOUBLE_compare, npy_longdouble) +COMPARE(BYTE_compare, npy_byte) COMPARE(SHORT_compare, short) COMPARE(INT_compare, int) COMPARE(LONG_compare, long) -COMPARE(LONGLONG_compare, longlong) -COMPARE(UBYTE_compare, ubyte) -COMPARE(USHORT_compare, ushort) -COMPARE(UINT_compare, uint) -COMPARE(ULONG_compare, ulong) -COMPARE(ULONGLONG_compare, ulonglong) +COMPARE(LONGLONG_compare, npy_longlong) +COMPARE(UBYTE_compare, npy_ubyte) +COMPARE(USHORT_compare, npy_ushort) +COMPARE(UINT_compare, npy_uint) +COMPARE(ULONG_compare, npy_ulong) +COMPARE(ULONGLONG_compare, npy_ulonglong) int OBJECT_compare(PyObject **ip1, PyObject **ip2) { @@ -843,14 +842,14 @@ CompareFunction compare_functions[] = \ PyObject *PyArray_OrderFilterND(PyObject *op1, PyObject *op2, int order) { PyArrayObject *ap1=NULL, *ap2=NULL, *ret=NULL; - intp *a_ind, *b_ind, *temp_ind, *mode_dep, *check_ind; - uintp *offsets, offset1; - intp *offsets2; + npy_intp *a_ind, *b_ind, *temp_ind, *mode_dep, *check_ind; + npy_uintp *offsets, offset1; + npy_intp *offsets2; int i, n2, n2_nonzero, k, check, incr = 1; int typenum, bytes_in_array; int is1, os; char *op, *ap1_ptr, *ap2_ptr, *sort_buffer; - intp *ret_ind; + npy_intp *ret_ind; CompareFunction compare_func; char *zptr=NULL; PyArray_CopySwapFunc *copyswap; @@ -911,21 +910,21 @@ PyObject *PyArray_OrderFilterND(PyObject *op1, PyObject *op2, int order) { copyswap = PyArray_DESCR(ret)->f->copyswap; - bytes_in_array = PyArray_NDIM(ap1)*sizeof(intp); + bytes_in_array = PyArray_NDIM(ap1)*sizeof(npy_intp); mode_dep = malloc(bytes_in_array); for (k = 0; k < PyArray_NDIM(ap1); k++) { mode_dep[k] = -((PyArray_DIMS(ap2)[k]-1) >> 1); } - b_ind = (intp *)malloc(bytes_in_array); /* loop variables */ + b_ind = (npy_intp *)malloc(bytes_in_array); /* loop variables */ memset(b_ind,0,bytes_in_array); - a_ind = (intp *)malloc(bytes_in_array); - ret_ind = (intp *)malloc(bytes_in_array); + a_ind = (npy_intp *)malloc(bytes_in_array); + ret_ind = (npy_intp *)malloc(bytes_in_array); memset(ret_ind,0,bytes_in_array); - temp_ind = (intp *)malloc(bytes_in_array); - check_ind = (intp*)malloc(bytes_in_array); - offsets = (uintp *)malloc(PyArray_NDIM(ap1)*sizeof(uintp)); - offsets2 = (intp *)malloc(PyArray_NDIM(ap1)*sizeof(intp)); + temp_ind = (npy_intp *)malloc(bytes_in_array); + check_ind = (npy_intp*)malloc(bytes_in_array); + offsets = (npy_uintp *)malloc(PyArray_NDIM(ap1)*sizeof(npy_uintp)); + offsets2 = (npy_intp *)malloc(PyArray_NDIM(ap1)*sizeof(npy_intp)); offset1 = compute_offsets(offsets, offsets2, PyArray_DIMS(ap1), PyArray_DIMS(ap2), PyArray_DIMS(ret), mode_dep, PyArray_NDIM(ap1)); @@ -1027,14 +1026,14 @@ static char doc_correlateND[] = "out = _correlateND(a,kernel,mode) \n\n mode = static char doc_convolve2d[] = "out = _convolve2d(in1, in2, flip, mode, boundary, fillvalue)"; -extern int pylab_convolve_2d(char*, intp*, char*, intp*, char*, intp*, intp*, - intp*, int, char*); +extern int pylab_convolve_2d(char*, npy_intp*, char*, npy_intp*, char*, + npy_intp*, npy_intp*, npy_intp*, int, char*); static PyObject *sigtools_convolve2d(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *in1=NULL, *in2=NULL, *fill_value=NULL; int mode=2, boundary=0, typenum, flag, flip=1, ret; - intp *aout_dimens=NULL; + npy_intp *aout_dimens=NULL; int i; PyArrayObject *ain1=NULL, *ain2=NULL, *aout=NULL; PyArrayObject *afill=NULL; @@ -1094,7 +1093,7 @@ static PyObject *sigtools_convolve2d(PyObject *NPY_UNUSED(dummy), PyObject *args if (afill == NULL) goto fail; } - aout_dimens = malloc(PyArray_NDIM(ain1)*sizeof(intp)); + aout_dimens = malloc(PyArray_NDIM(ain1)*sizeof(npy_intp)); switch(mode & OUTSIZE_MASK) { case VALID: for (i = 0; i < PyArray_NDIM(ain1); i++) { @@ -1206,7 +1205,7 @@ static PyObject *sigtools_remez(PyObject *NPY_UNUSED(dummy), PyObject *args) int k, numtaps, numbands, type = BANDPASS, err; PyArrayObject *a_bands=NULL, *a_des=NULL, *a_weight=NULL; PyArrayObject *h=NULL; - intp ret_dimens; int maxiter = 25, grid_density = 16; + npy_intp ret_dimens; int maxiter = 25, grid_density = 16; double oldvalue, *dptr, fs = 1.0; char mystr[255]; int niter = -1; @@ -1303,9 +1302,9 @@ static PyObject *sigtools_remez(PyObject *NPY_UNUSED(dummy), PyObject *args) static char doc_median2d[] = "filt = _median2d(data, size)"; -extern void f_medfilt2(float*,float*,intp*,intp*); -extern void d_medfilt2(double*,double*,intp*,intp*); -extern void b_medfilt2(unsigned char*,unsigned char*,intp*,intp*); +extern void f_medfilt2(float*,float*,npy_intp*,npy_intp*); +extern void d_medfilt2(double*,double*,npy_intp*,npy_intp*); +extern void b_medfilt2(unsigned char*,unsigned char*,npy_intp*,npy_intp*); static PyObject *sigtools_median2d(PyObject *NPY_UNUSED(dummy), PyObject *args) { @@ -1313,7 +1312,7 @@ static PyObject *sigtools_median2d(PyObject *NPY_UNUSED(dummy), PyObject *args) int typenum; PyArrayObject *a_image=NULL, *a_size=NULL; PyArrayObject *a_out=NULL; - intp Nwin[2] = {3,3}; + npy_intp Nwin[2] = {3,3}; if (!PyArg_ParseTuple(args, "O|O", &image, &size)) return NULL; @@ -1326,8 +1325,8 @@ static PyObject *sigtools_median2d(PyObject *NPY_UNUSED(dummy), PyObject *args) if (a_size == NULL) goto fail; if ((PyArray_NDIM(a_size) != 1) || (PyArray_DIMS(a_size)[0] < 2)) PYERR("Size must be a length two sequence"); - Nwin[0] = ((intp *)PyArray_DATA(a_size))[0]; - Nwin[1] = ((intp *)PyArray_DATA(a_size))[1]; + Nwin[0] = ((npy_intp *)PyArray_DATA(a_size))[0]; + Nwin[1] = ((npy_intp *)PyArray_DATA(a_size))[1]; } a_out = (PyArrayObject *)PyArray_SimpleNew(2, PyArray_DIMS(a_image), typenum); From 280524810d0e7ac16e2e77507f3e756ce87e4de1 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 2 Dec 2018 21:22:42 -0800 Subject: [PATCH 51/70] BUG: fix for non-gcc --- scipy/signal/firfilter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/signal/firfilter.c b/scipy/signal/firfilter.c index 0545331cbae2..b5ec6133e16c 100644 --- a/scipy/signal/firfilter.c +++ b/scipy/signal/firfilter.c @@ -65,7 +65,7 @@ static void fname ## _onemultadd2(char *sum, char *term1, char *term2) { \ return; } #define MAKE_C_ONEMULTADD2(fname, type) \ -static void fname ## _onemultadd(char *sum, char *term1, npy_intp str, +static void fname ## _onemultadd(char *sum, char *term1, npy_intp str, \ char **pvals, npy_intp n) { \ npy_intp k; \ for (k=0; k < n; k++) { \ From 0f4238228bb5e2c03454afb0cc4c92a3d16f300c Mon Sep 17 00:00:00 2001 From: Kai Striega Date: Mon, 3 Dec 2018 15:01:10 +0800 Subject: [PATCH 52/70] TST: Add test case with example callback --- scipy/optimize/tests/test_linprog.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/scipy/optimize/tests/test_linprog.py b/scipy/optimize/tests/test_linprog.py index 674128b02766..f9c6be992d78 100644 --- a/scipy/optimize/tests/test_linprog.py +++ b/scipy/optimize/tests/test_linprog.py @@ -1165,12 +1165,20 @@ def cb(res): last_cb['slack'] = res['slack'] last_cb['con'] = res['con'] - c = np.array([-3, -2]) - A_ub = [[2, 1], [1, 1], [1, 0]] - b_ub = [10, 8, 4] - res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) + c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] + A_eq = [[-1, -1, -1, 0, 0, 0], + [0, 0, 0, 1, 1, 1], + [1, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1]] + b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] + + desired_fun = -1.77 + desired_x = [0.3, 0.2, 0, 0, 0.1, 0.3] + with pytest.warns(OptimizeWarning): + res = linprog(c, A_eq=A_eq, b_eq=b_eq, callback=cb, method=self.method) - _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) + _assert_success(res, desired_fun=-1.77, desired_x=desired_x) assert_allclose(last_cb['fun'], res['fun']) assert_allclose(last_cb['x'], res['x']) assert_allclose(last_cb['con'], res['con']) From da83a4e8421dcebf0faf44bdd7fda528e70a21ab Mon Sep 17 00:00:00 2001 From: Kai Striega Date: Mon, 3 Dec 2018 15:58:38 +0800 Subject: [PATCH 53/70] BUG: Zero solution vector after each iteration. --- scipy/optimize/_linprog_simplex.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scipy/optimize/_linprog_simplex.py b/scipy/optimize/_linprog_simplex.py index 41e3b3f7f3ef..cd8227a0733b 100644 --- a/scipy/optimize/_linprog_simplex.py +++ b/scipy/optimize/_linprog_simplex.py @@ -324,9 +324,9 @@ def _solve_simplex(T, n, basis, maxiter=1000, phase=2, status=0, message='', complete = False if phase == 1: - m = T.shape[0]-2 + m = T.shape[1]-2 elif phase == 2: - m = T.shape[0]-1 + m = T.shape[1]-1 else: raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2") @@ -370,6 +370,7 @@ def _solve_simplex(T, n, basis, maxiter=1000, phase=2, status=0, message='', complete = True if callback is not None: + solution[:] = 0 solution[basis[:n]] = T[:n, -1] x = solution[:m] c, A_ub, b_ub, A_eq, b_eq, bounds, undo = _T_o From edc4ef4bf763287f2632ff17fdc02608386db819 Mon Sep 17 00:00:00 2001 From: Dowon Date: Tue, 4 Dec 2018 10:11:45 +0900 Subject: [PATCH 54/70] changes the name of a check method --- scipy/stats/tests/test_stats.py | 58 ++++++++++++++++----------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index e66ca50a7527..18221d996dff 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -3560,13 +3560,13 @@ def test_obrientransform(): class StatsTestMethod(object): - def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): + def check_equal_gmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): # Note this doesn't test when axis is not specified x = stats.gmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) - def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): + def check_equal_hmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): x = stats.hmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) @@ -3577,54 +3577,54 @@ def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] desired = 34.1417152147 - self.equal_hmean_test(a, desired) + self.check_equal_hmean(a, desired) a = [1, 2, 3, 4] desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4) - self.equal_hmean_test(a, desired) + self.check_equal_hmean(a, desired) def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 34.1417152147 - self.equal_hmean_test(a, desired) + self.check_equal_hmean(a, desired) # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 38.6696271841 - self.equal_hmean_test(a, desired) + self.check_equal_hmean(a, desired) def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 38.6696271841 - self.equal_hmean_test(np.array(a), desired) + self.check_equal_hmean(np.array(a), desired) def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) - self.equal_hmean_test(a, desired, axis=0) + self.check_equal_hmean(a, desired, axis=0) def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([19.2, 63.03939962, 103.80078637]) - self.equal_hmean_test(a, desired, axis=1) + self.check_equal_hmean(a, desired, axis=1) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]]) - self.equal_hmean_test(np.matrix(a), desired, axis=0) + self.check_equal_hmean(np.matrix(a), desired, axis=0) def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[19.2, 63.03939962, 103.80078637]]).T - self.equal_hmean_test(np.matrix(a), desired, axis=1) + self.check_equal_hmean(np.matrix(a), desired, axis=1) class TestGeoMean(StatsTestMethod): @@ -3632,85 +3632,85 @@ def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] desired = 45.2872868812 - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) a = [1, 2, 3, 4] desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.equal_gmean_test(a, desired, rtol=1e-14) + self.check_equal_gmean(a, desired, rtol=1e-14) def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 45.2872868812 - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) a = array([1, 2, 3, 4], float32) desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.equal_gmean_test(a, desired, dtype=float32) + self.check_equal_gmean(a, desired, dtype=float32) # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 52.8885199 - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 52.8885199 - self.equal_gmean_test(array(a), desired) + self.check_equal_gmean(array(a), desired) def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) - self.equal_gmean_test(a, desired, axis=0) + self.check_equal_gmean(a, desired, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = array([1, 2, 3, 4]) - self.equal_gmean_test(a, desired, axis=0, rtol=1e-14) + self.check_equal_gmean(a, desired, axis=0, rtol=1e-14) def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([22.13363839, 64.02171746, 104.40086817]) - self.equal_gmean_test(a, desired, axis=1) + self.check_equal_gmean(a, desired, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = array([v, v, v]) - self.equal_gmean_test(a, desired, axis=1, rtol=1e-14) + self.check_equal_gmean(a, desired, axis=1, rtol=1e-14) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) - self.equal_gmean_test(np.matrix(a), desired, axis=0) + self.check_equal_gmean(np.matrix(a), desired, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix([1, 2, 3, 4]) - self.equal_gmean_test(np.matrix(a), desired, axis=0, rtol=1e-14) + self.check_equal_gmean(np.matrix(a), desired, axis=0, rtol=1e-14) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix(stats.gmean(a, axis=0)) - self.equal_gmean_test(np.matrix(a), desired, axis=0, rtol=1e-14) + self.check_equal_gmean(np.matrix(a), desired, axis=0, rtol=1e-14) def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T - self.equal_gmean_test(np.matrix(a), desired, axis=1) + self.check_equal_gmean(np.matrix(a), desired, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = np.matrix([[v], [v], [v]]) - self.equal_gmean_test(np.matrix(a), desired, axis=1, rtol=1e-14) + self.check_equal_gmean(np.matrix(a), desired, axis=1, rtol=1e-14) def test_large_values(self): a = array([1e100, 1e200, 1e300]) desired = 1e200 - self.equal_gmean_test(a, desired, rtol=1e-13) + self.check_equal_gmean(a, desired, rtol=1e-13) def test_1d_list0(self): # Test a 1d list with zero element @@ -3718,7 +3718,7 @@ def test_1d_list0(self): desired = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) finally: np.seterr(**olderr) @@ -3728,7 +3728,7 @@ def test_1d_array0(self): desired = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) finally: np.seterr(**olderr) From 32170e92eb8eda1f640f0edc9325d3e3d78dbe14 Mon Sep 17 00:00:00 2001 From: Dowon Date: Tue, 4 Dec 2018 10:12:45 +0900 Subject: [PATCH 55/70] changes the name of check method --- scipy/stats/tests/test_mstats_basic.py | 38 +++++++++++++------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/scipy/stats/tests/test_mstats_basic.py b/scipy/stats/tests/test_mstats_basic.py index b416931f3788..29abd8ef2d71 100644 --- a/scipy/stats/tests/test_mstats_basic.py +++ b/scipy/stats/tests/test_mstats_basic.py @@ -43,13 +43,13 @@ def test_mquantiles_limit_keyword(self): class MStatsTestMethod(object): - def equal_gmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): + def check_equal_gmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): # Note this doesn't test when axis is not specified x = mstats.gmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) - def equal_hmean_test(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): + def check_equal_hmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): x = stats.hmean(array_like, axis=axis, dtype=dtype) assert_allclose(x, desired, rtol=rtol) assert_equal(x.dtype, dtype) @@ -59,23 +59,23 @@ class TestGeoMean(MStatsTestMethod): def test_1d(self): a = [1, 2, 3, 4] desired = np.power(1*2*3*4, 1./4.) - self.equal_gmean_test(a, desired, rtol=1e-14) + self.check_equal_gmean(a, desired, rtol=1e-14) def test_1d_ma(self): # Test a 1d masked array a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 45.2872868812 - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = np.power(1*2*3, 1./3.) - self.equal_gmean_test(a, desired, rtol=1e-14) + self.check_equal_gmean(a, desired, rtol=1e-14) def test_1d_ma_value(self): # Test a 1d masked array with a masked value a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) desired = 41.4716627439 - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) def test_1d_ma0(self): # Test a 1d masked array with zero element @@ -83,7 +83,7 @@ def test_1d_ma0(self): desired = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) finally: np.seterr(**olderr) @@ -93,7 +93,7 @@ def test_1d_ma_inf(self): desired = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.equal_gmean_test(a, desired) + self.check_equal_gmean(a, desired) finally: np.seterr(**olderr) @@ -101,58 +101,58 @@ def test_1d_ma_inf(self): def test_1d_float96(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired_dt = np.power(1*2*3, 1./3.).astype(np.float96) - self.equal_gmean_test(a, desired_dt, dtype=np.float96, rtol=1e-14) + self.check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14) def test_2d_ma(self): a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) desired = np.array([1, 2, 3, 4]) - self.equal_gmean_test(a, desired, axis=0, rtol=1e-14) + self.check_equal_gmean(a, desired, axis=0, rtol=1e-14) desired = ma.array([np.power(1*2*3*4, 1./4.), np.power(2*3, 1./2.), np.power(1*4, 1./2.)]) - self.equal_gmean_test(a, desired, axis=-1, rtol=1e-14) + self.check_equal_gmean(a, desired, axis=-1, rtol=1e-14) # Test a 2d masked array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 52.8885199 - self.equal_gmean_test(np.ma.array(a), desired) + self.check_equal_gmean(np.ma.array(a), desired) class TestHarMean(MStatsTestMethod): def test_1d(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = 3. / (1./1 + 1./2 + 1./3) - self.equal_hmean_test(a, desired, rtol=1e-14) + self.check_equal_hmean(a, desired, rtol=1e-14) a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 34.1417152147 - self.equal_hmean_test(a, desired) + self.check_equal_hmean(a, desired) a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) desired = 31.8137186141 - self.equal_hmean_test(a, desired) + self.check_equal_hmean(a, desired) @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') def test_1d_float96(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96) - self.equal_hmean_test(a, desired_dt, dtype=np.float96) + self.check_equal_hmean(a, desired_dt, dtype=np.float96) def test_2d(self): a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) desired = ma.array([1, 2, 3, 4]) - self.equal_hmean_test(a, desired, axis=0, rtol=1e-14) + self.check_equal_hmean(a, desired, axis=0, rtol=1e-14) desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)] - self.equal_hmean_test(a, desired, axis=-1, rtol=1e-14) + self.check_equal_hmean(a, desired, axis=-1, rtol=1e-14) a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 38.6696271841 - self.equal_hmean_test(np.ma.array(a), desired) + self.check_equal_hmean(np.ma.array(a), desired) class TestRanking(object): From b04562fe01f872f805b034415d2ba7556d9d9de0 Mon Sep 17 00:00:00 2001 From: Dowon Date: Tue, 4 Dec 2018 14:02:00 +0900 Subject: [PATCH 56/70] change class method to function method --- scipy/stats/tests/test_stats.py | 77 ++++++++++++++++----------------- 1 file changed, 38 insertions(+), 39 deletions(-) diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index 18221d996dff..d158ecfefdbe 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -3559,158 +3559,157 @@ def test_obrientransform(): assert_array_almost_equal(result[0], expected, decimal=4) -class StatsTestMethod(object): - def check_equal_gmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): - # Note this doesn't test when axis is not specified - x = stats.gmean(array_like, axis=axis, dtype=dtype) - assert_allclose(x, desired, rtol=rtol) - assert_equal(x.dtype, dtype) +def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7): + # Note this doesn't test when axis is not specified + x = stats.gmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) - def check_equal_hmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): - x = stats.hmean(array_like, axis=axis, dtype=dtype) - assert_allclose(x, desired, rtol=rtol) - assert_equal(x.dtype, dtype) +def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7): + x = stats.hmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) -class TestHarMean(StatsTestMethod): +class TestHarMean(object): def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] desired = 34.1417152147 - self.check_equal_hmean(a, desired) + check_equal_hmean(a, desired) a = [1, 2, 3, 4] desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4) - self.check_equal_hmean(a, desired) + check_equal_hmean(a, desired) def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 34.1417152147 - self.check_equal_hmean(a, desired) + check_equal_hmean(a, desired) # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 38.6696271841 - self.check_equal_hmean(a, desired) + check_equal_hmean(a, desired) def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 38.6696271841 - self.check_equal_hmean(np.array(a), desired) + check_equal_hmean(np.array(a), desired) def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) - self.check_equal_hmean(a, desired, axis=0) + check_equal_hmean(a, desired, axis=0) def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([19.2, 63.03939962, 103.80078637]) - self.check_equal_hmean(a, desired, axis=1) + check_equal_hmean(a, desired, axis=1) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]]) - self.check_equal_hmean(np.matrix(a), desired, axis=0) + check_equal_hmean(np.matrix(a), desired, axis=0) def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[19.2, 63.03939962, 103.80078637]]).T - self.check_equal_hmean(np.matrix(a), desired, axis=1) + check_equal_hmean(np.matrix(a), desired, axis=1) -class TestGeoMean(StatsTestMethod): +class TestGeoMean(object): def test_1d_list(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] desired = 45.2872868812 - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) a = [1, 2, 3, 4] desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.check_equal_gmean(a, desired, rtol=1e-14) + check_equal_gmean(a, desired, rtol=1e-14) def test_1d_array(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 45.2872868812 - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) a = array([1, 2, 3, 4], float32) desired = power(1 * 2 * 3 * 4, 1. / 4.) - self.check_equal_gmean(a, desired, dtype=float32) + check_equal_gmean(a, desired, dtype=float32) # Note the next tests use axis=None as default, not axis=0 def test_2d_list(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 52.8885199 - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) def test_2d_array(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 52.8885199 - self.check_equal_gmean(array(a), desired) + check_equal_gmean(array(a), desired) def test_2d_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) - self.check_equal_gmean(a, desired, axis=0) + check_equal_gmean(a, desired, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = array([1, 2, 3, 4]) - self.check_equal_gmean(a, desired, axis=0, rtol=1e-14) + check_equal_gmean(a, desired, axis=0, rtol=1e-14) def test_2d_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.array([22.13363839, 64.02171746, 104.40086817]) - self.check_equal_gmean(a, desired, axis=1) + check_equal_gmean(a, desired, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = array([v, v, v]) - self.check_equal_gmean(a, desired, axis=1, rtol=1e-14) + check_equal_gmean(a, desired, axis=1, rtol=1e-14) def test_2d_matrix_axis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) - self.check_equal_gmean(np.matrix(a), desired, axis=0) + check_equal_gmean(np.matrix(a), desired, axis=0) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix([1, 2, 3, 4]) - self.check_equal_gmean(np.matrix(a), desired, axis=0, rtol=1e-14) + check_equal_gmean(np.matrix(a), desired, axis=0, rtol=1e-14) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) desired = np.matrix(stats.gmean(a, axis=0)) - self.check_equal_gmean(np.matrix(a), desired, axis=0, rtol=1e-14) + check_equal_gmean(np.matrix(a), desired, axis=0, rtol=1e-14) def test_2d_matrix_axis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T - self.check_equal_gmean(np.matrix(a), desired, axis=1) + check_equal_gmean(np.matrix(a), desired, axis=1) a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) v = power(1 * 2 * 3 * 4, 1. / 4.) desired = np.matrix([[v], [v], [v]]) - self.check_equal_gmean(np.matrix(a), desired, axis=1, rtol=1e-14) + check_equal_gmean(np.matrix(a), desired, axis=1, rtol=1e-14) def test_large_values(self): a = array([1e100, 1e200, 1e300]) desired = 1e200 - self.check_equal_gmean(a, desired, rtol=1e-13) + check_equal_gmean(a, desired, rtol=1e-13) def test_1d_list0(self): # Test a 1d list with zero element @@ -3718,7 +3717,7 @@ def test_1d_list0(self): desired = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) finally: np.seterr(**olderr) @@ -3728,7 +3727,7 @@ def test_1d_array0(self): desired = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) finally: np.seterr(**olderr) From b6e0fa5a0baa59250325e5f915780d6bf5be2fdc Mon Sep 17 00:00:00 2001 From: Dowon Date: Tue, 4 Dec 2018 14:03:02 +0900 Subject: [PATCH 57/70] changes class method to function method --- scipy/stats/tests/test_mstats_basic.py | 57 +++++++++++++------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/scipy/stats/tests/test_mstats_basic.py b/scipy/stats/tests/test_mstats_basic.py index 29abd8ef2d71..953b76eadb0c 100644 --- a/scipy/stats/tests/test_mstats_basic.py +++ b/scipy/stats/tests/test_mstats_basic.py @@ -42,40 +42,39 @@ def test_mquantiles_limit_keyword(self): assert_almost_equal(quants, desired) -class MStatsTestMethod(object): - def check_equal_gmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): - # Note this doesn't test when axis is not specified - x = mstats.gmean(array_like, axis=axis, dtype=dtype) - assert_allclose(x, desired, rtol=rtol) - assert_equal(x.dtype, dtype) +def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7): + # Note this doesn't test when axis is not specified + x = mstats.gmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) - def check_equal_hmean(self, array_like, desired, axis=None, dtype=None, rtol=1e-7): - x = stats.hmean(array_like, axis=axis, dtype=dtype) - assert_allclose(x, desired, rtol=rtol) - assert_equal(x.dtype, dtype) +def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7): + x = stats.hmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) -class TestGeoMean(MStatsTestMethod): +class TestGeoMean(object): def test_1d(self): a = [1, 2, 3, 4] desired = np.power(1*2*3*4, 1./4.) - self.check_equal_gmean(a, desired, rtol=1e-14) + check_equal_gmean(a, desired, rtol=1e-14) def test_1d_ma(self): # Test a 1d masked array a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 45.2872868812 - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = np.power(1*2*3, 1./3.) - self.check_equal_gmean(a, desired, rtol=1e-14) + check_equal_gmean(a, desired, rtol=1e-14) def test_1d_ma_value(self): # Test a 1d masked array with a masked value a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) desired = 41.4716627439 - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) def test_1d_ma0(self): # Test a 1d masked array with zero element @@ -83,7 +82,7 @@ def test_1d_ma0(self): desired = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) finally: np.seterr(**olderr) @@ -93,7 +92,7 @@ def test_1d_ma_inf(self): desired = 41.4716627439 olderr = np.seterr(all='ignore') try: - self.check_equal_gmean(a, desired) + check_equal_gmean(a, desired) finally: np.seterr(**olderr) @@ -101,58 +100,58 @@ def test_1d_ma_inf(self): def test_1d_float96(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired_dt = np.power(1*2*3, 1./3.).astype(np.float96) - self.check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14) + check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14) def test_2d_ma(self): a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) desired = np.array([1, 2, 3, 4]) - self.check_equal_gmean(a, desired, axis=0, rtol=1e-14) + check_equal_gmean(a, desired, axis=0, rtol=1e-14) desired = ma.array([np.power(1*2*3*4, 1./4.), np.power(2*3, 1./2.), np.power(1*4, 1./2.)]) - self.check_equal_gmean(a, desired, axis=-1, rtol=1e-14) + check_equal_gmean(a, desired, axis=-1, rtol=1e-14) # Test a 2d masked array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 52.8885199 - self.check_equal_gmean(np.ma.array(a), desired) + check_equal_gmean(np.ma.array(a), desired) -class TestHarMean(MStatsTestMethod): +class TestHarMean(object): def test_1d(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired = 3. / (1./1 + 1./2 + 1./3) - self.check_equal_hmean(a, desired, rtol=1e-14) + check_equal_hmean(a, desired, rtol=1e-14) a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) desired = 34.1417152147 - self.check_equal_hmean(a, desired) + check_equal_hmean(a, desired) a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) desired = 31.8137186141 - self.check_equal_hmean(a, desired) + check_equal_hmean(a, desired) @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') def test_1d_float96(self): a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96) - self.check_equal_hmean(a, desired_dt, dtype=np.float96) + check_equal_hmean(a, desired_dt, dtype=np.float96) def test_2d(self): a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) desired = ma.array([1, 2, 3, 4]) - self.check_equal_hmean(a, desired, axis=0, rtol=1e-14) + check_equal_hmean(a, desired, axis=0, rtol=1e-14) desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)] - self.check_equal_hmean(a, desired, axis=-1, rtol=1e-14) + check_equal_hmean(a, desired, axis=-1, rtol=1e-14) a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] desired = 38.6696271841 - self.check_equal_hmean(np.ma.array(a), desired) + check_equal_hmean(np.ma.array(a), desired) class TestRanking(object): From 668751269162f20b3c0f3955d829a00324b87c16 Mon Sep 17 00:00:00 2001 From: Johann Faouzi Date: Tue, 4 Dec 2018 18:52:17 +0100 Subject: [PATCH 58/70] Correct docstring in show_options in optimize --- scipy/optimize/optimize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/optimize/optimize.py b/scipy/optimize/optimize.py index dcb33320d4d5..9b2da1efe94a 100644 --- a/scipy/optimize/optimize.py +++ b/scipy/optimize/optimize.py @@ -2971,7 +2971,7 @@ def show_options(solver=None, method=None, disp=True): Returns ------- text - Either None (for disp=False) or the text string (disp=True) + Either None (for disp=True) or the text string (disp=False) Notes ----- From 1ffc75525ccb7ae01cf709985f380de47f1d14a4 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 6 Dec 2018 21:55:26 -0800 Subject: [PATCH 59/70] TST: fix minor issue in a signal.stft test. The use of np.empty caused a spurious RuntimeWarning in CI, see e.g. https://circleci.com/gh/scipy/scipy/10288?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link --- scipy/signal/tests/test_spectral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/signal/tests/test_spectral.py b/scipy/signal/tests/test_spectral.py index 357075f6a9af..7de91f8f7107 100644 --- a/scipy/signal/tests/test_spectral.py +++ b/scipy/signal/tests/test_spectral.py @@ -1086,7 +1086,7 @@ def test_input_validation(self): assert_raises(ValueError, check_NOLA, np.ones(20), 10, 0) assert_raises(ValueError, check_NOLA, 'hann', 64, -32) - x = np.empty(1024) + x = np.zeros(1024) z = stft(x) assert_raises(ValueError, stft, x, window=np.ones((2,2))) From 97479725e6a747cab540688b7c895df35c1b697b Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Mon, 26 Nov 2018 20:19:06 -0800 Subject: [PATCH 60/70] TST: add Windows Azure CI --- azure-pipelines.yml | 110 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 azure-pipelines.yml diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 000000000000..0ddf5b970940 --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,110 @@ +trigger: + # start a new build for every push + batch: False + branches: + include: + - master + - maintenance/* + +jobs: +- job: Windows + pool: + vmIMage: 'VS2017-Win2016' + variables: + OPENBLAS_32: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win32.zip + OPENBLAS_64: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win64.zip + strategy: + maxParallel: 4 + matrix: + Python36-32bit-full: + PYTHON_VERSION: '3.6' + PYTHON_ARCH: 'x86' + TEST_MODE: full + OPENBLAS: $(OPENBLAS_32) + BITS: 32 + Python35-64bit-full: + PYTHON_VERSION: '3.5' + PYTHON_ARCH: 'x64' + TEST_MODE: full + OPENBLAS: $(OPENBLAS_64) + BITS: 64 + Python36-64bit-full: + PYTHON_VERSION: '3.6' + PYTHON_ARCH: 'x64' + TEST_MODE: full + OPENBLAS: $(OPENBLAS_64) + BITS: 64 + Python37-64bit-full: + PYTHON_VERSION: '3.7' + PYTHON_ARCH: 'x64' + TEST_MODE: full + OPENBLAS: $(OPENBLAS_64) + BITS: 64 + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: $(PYTHON_VERSION) + addToPath: true + architecture: $(PYTHON_ARCH) + # as noted by numba project, currently need + # specific VC install for Python 2.7 + - powershell: | + $wc = New-Object net.webclient + $wc.Downloadfile("https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi", "VCForPython27.msi") + Start-Process "VCForPython27.msi" /qn -Wait + displayName: 'Install VC 9.0' + condition: eq(variables['PYTHON_VERSION'], '2.7') + - script: python -m pip install --upgrade pip setuptools wheel + displayName: 'Install tools' + - powershell: | + $wc = New-Object net.webclient; + $wc.Downloadfile("$(OPENBLAS)", "openblas.zip") + $tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ } + Expand-Archive "openblas.zip" $tmpdir + $pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])" + Write-Host "Python Version: $pyversion" + $target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a" + Write-Host "target path: $target" + cp $tmpdir\$(BITS)\lib\libopenblas_5f998ef_gcc7_1_0.a $target + displayName: 'Download / Install OpenBLAS' + - powershell: | + # NOTE: can probably (eventually) abstract this + # upstream in Microsoft repo to support x86 natively + choco install -y mingw --forcex86 --force + displayName: 'Install 32-bit mingw for 32-bit builds' + condition: eq(variables['BITS'], 32) + - script: python -m pip install numpy cython==0.28.5 pytest pytest-timeout pytest-xdist pytest-env pytest-faulthandler Pillow mpmath matplotlib + displayName: 'Install dependencies' + - powershell: | + # need a version of NumPy distutils that can build + # with msvc + mingw-gfortran + $NumpyDir = $((python -c 'import os; import numpy; print(os.path.dirname(numpy.__file__))') | Out-String).Trim() + rm -r -Force "$NumpyDir\distutils" + $tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ } + git clone -q --depth=1 -b master https://github.com/numpy/numpy.git $tmpdir + mv $tmpdir\numpy\distutils $NumpyDir + displayName: 'Replace NumPy distutils' + - powershell: | + If ($(BITS) -eq 32) { + # 32-bit build requires careful adjustments + # until Microsoft has a switch we can use + # directly for i686 mingw + $env:NPY_DISTUTILS_APPEND_FLAGS = 1 + $env:CFLAGS = "-m32" + $env:LDFLAGS = "-m32" + $env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH + refreshenv + } + + mkdir dist + pip wheel --no-build-isolation -v -v -v --wheel-dir=dist . + ls dist -r | Foreach-Object { + pip install $_.FullName + } + displayName: 'Build SciPy' + - script: python runtests.py -n --mode=$(TEST_MODE) -- -n auto -rsx --junitxml=junit/test-results.xml + displayName: 'Run SciPy Test Suite' + - task: PublishTestResults@2 + inputs: + testResultsFiles: '**/test-*.xml' + testRunTitle: 'Publish test results for Python $(python.version)' From 6d0eade28b63814127d401a3e3be8c1bae7dd59b Mon Sep 17 00:00:00 2001 From: FranzForstmayr Date: Sat, 8 Dec 2018 08:23:05 +0100 Subject: [PATCH 61/70] Included blank line before list - Fixes #8658 (#9580) DOC: Included blank line before list to fix rendering Fixes gh-8658 --- scipy/optimize/_root.py | 42 +++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/scipy/optimize/_root.py b/scipy/optimize/_root.py index c85f74a7ea18..1dfa5e1352e3 100644 --- a/scipy/optimize/_root.py +++ b/scipy/optimize/_root.py @@ -345,16 +345,23 @@ def _root_broyden1_doc(): method and values for additional parameters. Methods available: - - ``restart``: drop all matrix columns. Has no + + - ``restart`` + Drop all matrix columns. Has no extra parameters. - - ``simple``: drop oldest matrix column. Has no + - ``simple`` + Drop oldest matrix column. Has no extra parameters. - - ``svd``: keep only the most significant SVD + - ``svd`` + Keep only the most significant SVD components. - Extra parameters: - - ``to_retain``: number of SVD components to - retain when rank reduction is done. - Default is ``max_rank - 2``. + + Extra parameters: + + - ``to_retain`` + Number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). @@ -403,16 +410,23 @@ def _root_broyden2_doc(): method and values for additional parameters. Methods available: - - ``restart``: drop all matrix columns. Has no + + - ``restart`` + Drop all matrix columns. Has no extra parameters. - - ``simple``: drop oldest matrix column. Has no + - ``simple`` + Drop oldest matrix column. Has no extra parameters. - - ``svd``: keep only the most significant SVD + - ``svd`` + Keep only the most significant SVD components. - Extra parameters: - - ``to_retain``: number of SVD components to - retain when rank reduction is done. - Default is ``max_rank - 2``. + + Extra parameters: + + - ``to_retain`` + Number of SVD components to + retain when rank reduction is done. + Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). From b9f5e900f750ea37fcf89de541b1a8ca42e7788e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 12 Dec 2018 17:44:01 -0800 Subject: [PATCH 62/70] MAINT: update `constants.astronomical_unit` to new 2012 value. Closes gh-9587 --- scipy/constants/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/constants/constants.py b/scipy/constants/constants.py index 701db97c7889..b94ece5e595c 100644 --- a/scipy/constants/constants.py +++ b/scipy/constants/constants.py @@ -120,7 +120,7 @@ fermi = 1e-15 angstrom = 1e-10 micron = 1e-6 -au = astronomical_unit = 149597870691.0 +au = astronomical_unit = 149597870700.0 light_year = Julian_year * c parsec = au / arcsec From 1542e0680a532d9aa4b72134556f2e2770e681aa Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Thu, 13 Dec 2018 09:34:50 -0800 Subject: [PATCH 63/70] TST: Add 32-bit testing to CI * Add 32-bit Linux testing to Azure CI; 32-bit testing has been missing from our CI for quite some time --- azure-pipelines.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0ddf5b970940..b7605002b30e 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -7,6 +7,30 @@ trigger: - maintenance/* jobs: +- job: Linux_Python_36_32bit_full + pool: + vmIMage: 'ubuntu-16.04' + steps: + - script: | + docker pull i386/ubuntu:bionic + docker run -v $(pwd):/scipy i386/ubuntu:bionic /bin/bash -c "cd scipy && \ + apt-get -y update && \ + apt-get -y install python3.6-dev python3-pip pkg-config libpng-dev libjpeg8-dev libfreetype6-dev && \ + pip3 install setuptools wheel numpy cython==0.29 pytest pytest-timeout pytest-xdist pytest-env pytest-faulthandler Pillow mpmath matplotlib && \ + apt-get -y install gfortran-5 wget && \ + cd .. && \ + mkdir openblas && cd openblas && \ + wget https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-0.3.0-Linux-i686.tar.gz && \ + tar zxvf openblas-0.3.0-Linux-i686.tar.gz && \ + cp -r ./usr/local/lib/* /usr/lib && \ + cp ./usr/local/include/* /usr/include && \ + cd ../scipy && \ + F77=gfortran-5 F90=gfortran-5 python3 runtests.py --mode=full -- -n auto -rsx --junitxml=junit/test-results.xml" + displayName: 'Run 32-bit Ubuntu Docker Build / Tests' + - task: PublishTestResults@2 + inputs: + testResultsFiles: '**/test-*.xml' + testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux' - job: Windows pool: vmIMage: 'VS2017-Win2016' From d1bdf3c99cff74fb57e1ea751081499b1e9c186b Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Fri, 14 Dec 2018 08:54:24 -0800 Subject: [PATCH 64/70] MAINT: add Azure badge --- README.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.rst b/README.rst index 6ffff481ac72..1e9a26b6325f 100644 --- a/README.rst +++ b/README.rst @@ -13,6 +13,9 @@ SciPy .. image:: https://codecov.io/gh/scipy/scipy/branch/master/graph/badge.svg :target: https://codecov.io/gh/scipy/scipy +.. image:: https://dev.azure.com/scipy-org/SciPy/_apis/build/status/scipy.scipy?branchName=master + :target: https://dev.azure.com/scipy-org/SciPy/_build/latest?definitionId=1?branchName=master + SciPy (pronounced "Sigh Pie") is open-source software for mathematics, science, and engineering. It includes modules for statistics, optimization, integration, linear algebra, Fourier transforms, signal and image processing, From e93781950cae9448b5386abb8521288e8354063b Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Fri, 14 Dec 2018 14:11:57 -0800 Subject: [PATCH 65/70] TST: remove VC 9.0 from Azure CI * master branch no longer supports / tests Python 2.7, so we can now safely remove the older VC 9.0 compiler from Windows Azure CI --- azure-pipelines.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b7605002b30e..34dc240e11fd 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -70,14 +70,6 @@ jobs: versionSpec: $(PYTHON_VERSION) addToPath: true architecture: $(PYTHON_ARCH) - # as noted by numba project, currently need - # specific VC install for Python 2.7 - - powershell: | - $wc = New-Object net.webclient - $wc.Downloadfile("https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi", "VCForPython27.msi") - Start-Process "VCForPython27.msi" /qn -Wait - displayName: 'Install VC 9.0' - condition: eq(variables['PYTHON_VERSION'], '2.7') - script: python -m pip install --upgrade pip setuptools wheel displayName: 'Install tools' - powershell: | From 69cbb7cad26b72fcbd32b8271533ccb797f7e278 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 14 Dec 2018 21:03:05 -0800 Subject: [PATCH 66/70] TST: reduce max memory usage for sparse.linalg.gcrotmk test. Closes gh-9595 This test calls `np.random.choice` with a large sparse array of shape (10000, 10000). Under the hood, `choice` ends up constructing a dense array of the same size explicitly, in `np.random.permutation`. That takes 0.8 GB for int64. That then goes on to `np.random.shuffle`, which creates some extra buffers. The `MemoryError` seems to be simply because we're pushing the 2 GB limit on 32-bit Python. --- scipy/sparse/linalg/isolve/tests/test_gcrotmk.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py b/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py index 84e2f48212ca..e8915607e90f 100644 --- a/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py +++ b/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py @@ -61,8 +61,8 @@ def test_preconditioner(self): def test_arnoldi(self): np.random.rand(1234) - A = eye(10000) + rand(10000,10000,density=1e-4) - b = np.random.rand(10000) + A = eye(2000) + rand(2000, 2000, density=5e-4) + b = np.random.rand(2000) # The inner arnoldi should be equivalent to gmres with suppress_warnings() as sup: From 1fa19a5365d049417da3c830fe7a1c63f2eb44af Mon Sep 17 00:00:00 2001 From: Roman Yurchak Date: Sat, 15 Dec 2018 07:06:40 +0100 Subject: [PATCH 67/70] MAINT: Use list comprehension when possible (#9533) MAINT: use list comprehension where possible --- scipy/_lib/decorator.py | 5 +- scipy/integrate/_ivp/ivp.py | 5 +- .../tests/test_banded_ode_solvers.py | 4 +- scipy/interpolate/interpolate.py | 5 +- scipy/io/idl.py | 41 ++++------- scipy/io/matlab/mio5_utils.pyx | 4 +- scipy/io/matlab/tests/test_mio.py | 5 +- scipy/linalg/tests/test_decomp_update.py | 7 +- scipy/optimize/_shgo.py | 4 +- .../canonical_constraint.py | 23 +++--- scipy/signal/tests/mpsig.py | 4 +- scipy/spatial/_spherical_voronoi.py | 7 +- scipy/spatial/qhull.pyx | 6 +- scipy/spatial/tests/test_qhull.py | 5 +- scipy/special/_generate_pyx.py | 20 ++--- scipy/special/_mptestutils.py | 4 +- scipy/special/_precompute/gammainc_asy.py | 4 +- scipy/special/_precompute/lambertw.py | 4 +- scipy/special/_precompute/loggamma.py | 5 +- scipy/special/tests/test_cython_special.py | 7 +- scipy/special/tests/test_mpmath.py | 73 ++++++------------- .../special/tests/test_precompute_gammainc.py | 4 +- scipy/special/utils/datafunc.py | 10 +-- scipy/stats/_distn_infrastructure.py | 5 +- scipy/stats/morestats.py | 4 +- scipy/stats/tests/test_distributions.py | 6 +- scipy/stats/tests/test_morestats.py | 4 +- 27 files changed, 90 insertions(+), 185 deletions(-) diff --git a/scipy/_lib/decorator.py b/scipy/_lib/decorator.py index db751b3081a4..ea6b81c50a2d 100644 --- a/scipy/_lib/decorator.py +++ b/scipy/_lib/decorator.py @@ -392,9 +392,8 @@ def dispatch_info(*types): An utility to introspect the dispatch algorithm """ check(types) - lst = [] - for anc in itertools.product(*ancestors(*types)): - lst.append(tuple(a.__name__ for a in anc)) + lst = [tuple(a.__name__ for a in anc) + for anc in itertools.product(*ancestors(*types))] return lst def _dispatch(dispatch_args, *args, **kw): diff --git a/scipy/integrate/_ivp/ivp.py b/scipy/integrate/_ivp/ivp.py index 755a9f6f554b..01c7a1ace8e9 100644 --- a/scipy/integrate/_ivp/ivp.py +++ b/scipy/integrate/_ivp/ivp.py @@ -105,9 +105,8 @@ def handle_events(sol, events, active_events, is_terminal, t_old, t): terminate : bool Whether a terminal event occurred. """ - roots = [] - for event_index in active_events: - roots.append(solve_event_equation(events[event_index], sol, t_old, t)) + roots = [solve_event_equation(events[event_index], sol, t_old, t) + for event_index in active_events] roots = np.asarray(roots) diff --git a/scipy/integrate/tests/test_banded_ode_solvers.py b/scipy/integrate/tests/test_banded_ode_solvers.py index ec8a19170daa..cb219f757e1a 100644 --- a/scipy/integrate/tests/test_banded_ode_solvers.py +++ b/scipy/integrate/tests/test_banded_ode_solvers.py @@ -36,9 +36,7 @@ def _linear_jac(t, y, a): def _linear_banded_jac(t, y, a): """Banded Jacobian.""" ml, mu = _band_count(a) - bjac = [] - for k in range(mu, 0, -1): - bjac.append(np.r_[[0] * k, np.diag(a, k)]) + bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)] bjac.append(np.diag(a)) for k in range(-1, -ml-1, -1): bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) diff --git a/scipy/interpolate/interpolate.py b/scipy/interpolate/interpolate.py index 61a446abeb53..902004d5439c 100644 --- a/scipy/interpolate/interpolate.py +++ b/scipy/interpolate/interpolate.py @@ -2501,9 +2501,8 @@ def _evaluate_linear(self, indices, norm_distances, out_of_bounds): return values def _evaluate_nearest(self, indices, norm_distances, out_of_bounds): - idx_res = [] - for i, yi in zip(indices, norm_distances): - idx_res.append(np.where(yi <= .5, i, i + 1)) + idx_res = [np.where(yi <= .5, i, i + 1) + for i, yi in zip(indices, norm_distances)] return self.values[tuple(idx_res)] def _find_indices(self, xi): diff --git a/scipy/io/idl.py b/scipy/io/idl.py index 593807d98abe..5cd4d37fbcfd 100644 --- a/scipy/io/idl.py +++ b/scipy/io/idl.py @@ -395,17 +395,13 @@ def _read_record(f): elif record['rectype'] == "HEAP_HEADER": record['nvalues'] = _read_long(f) - record['indices'] = [] - for i in range(record['nvalues']): - record['indices'].append(_read_long(f)) + record['indices'] = [_read_long(f) for _ in range(record['nvalues'])] elif record['rectype'] == "COMMONBLOCK": record['nvars'] = _read_long(f) record['name'] = _read_string(f) - record['varnames'] = [] - for i in range(record['nvars']): - record['varnames'].append(_read_string(f)) + record['varnames'] = [_read_string(f) for _ in range(record['nvars'])] elif record['rectype'] == "END_MARKER": @@ -466,9 +462,7 @@ def _read_arraydesc(f): arraydesc['nmax'] = _read_long(f) - arraydesc['dims'] = [] - for d in range(arraydesc['nmax']): - arraydesc['dims'].append(_read_long(f)) + arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])] elif arraydesc['arrstart'] == 18: @@ -518,32 +512,27 @@ def _read_structdesc(f): if not structdesc['predef']: - structdesc['tagtable'] = [] - for t in range(structdesc['ntags']): - structdesc['tagtable'].append(_read_tagdesc(f)) + structdesc['tagtable'] = [_read_tagdesc(f) + for _ in range(structdesc['ntags'])] for tag in structdesc['tagtable']: tag['name'] = _read_string(f) - structdesc['arrtable'] = {} - for tag in structdesc['tagtable']: - if tag['array']: - structdesc['arrtable'][tag['name']] = _read_arraydesc(f) + structdesc['arrtable'] = {tag['name']: _read_arraydesc(f) + for tag in structdesc['tagtable'] + if tag['array']} - structdesc['structtable'] = {} - for tag in structdesc['tagtable']: - if tag['structure']: - structdesc['structtable'][tag['name']] = _read_structdesc(f) + structdesc['structtable'] = {tag['name']: _read_structdesc(f) + for tag in structdesc['tagtable'] + if tag['structure']} if structdesc['inherits'] or structdesc['is_super']: structdesc['classname'] = _read_string(f) structdesc['nsupclasses'] = _read_long(f) - structdesc['supclassnames'] = [] - for s in range(structdesc['nsupclasses']): - structdesc['supclassnames'].append(_read_string(f)) - structdesc['supclasstable'] = [] - for s in range(structdesc['nsupclasses']): - structdesc['supclasstable'].append(_read_structdesc(f)) + structdesc['supclassnames'] = [ + _read_string(f) for _ in range(structdesc['nsupclasses'])] + structdesc['supclasstable'] = [ + _read_structdesc(f) for _ in range(structdesc['nsupclasses'])] STRUCT_DICT[structdesc['name']] = structdesc diff --git a/scipy/io/matlab/mio5_utils.pyx b/scipy/io/matlab/mio5_utils.pyx index b7f02cce452b..ef7805dddb06 100644 --- a/scipy/io/matlab/mio5_utils.pyx +++ b/scipy/io/matlab/mio5_utils.pyx @@ -611,9 +611,7 @@ cdef class VarReader5: raise ValueError('Too many dimensions (%d) for numpy arrays' % header.n_dims) # convert dims to list - header.dims = [] - for i in range(header.n_dims): - header.dims.append(header.dims_ptr[i]) + header.dims = [header.dims_ptr[i] for i in range(header.n_dims)] header.name = self.read_int8_string() return header diff --git a/scipy/io/matlab/tests/test_mio.py b/scipy/io/matlab/tests/test_mio.py index 7cb1e771024f..a6a790ba106b 100644 --- a/scipy/io/matlab/tests/test_mio.py +++ b/scipy/io/matlab/tests/test_mio.py @@ -311,9 +311,8 @@ def _whos_check_case(name, files, case, classes): whos = whosmat(file_name) - expected_whos = [] - for k, expected in case.items(): - expected_whos.append((k, expected.shape, classes[k])) + expected_whos = [ + (k, expected.shape, classes[k]) for k, expected in case.items()] whos.sort() expected_whos.sort() diff --git a/scipy/linalg/tests/test_decomp_update.py b/scipy/linalg/tests/test_decomp_update.py index 0e840e7b6e1b..c961c84f8a74 100644 --- a/scipy/linalg/tests/test_decomp_update.py +++ b/scipy/linalg/tests/test_decomp_update.py @@ -81,11 +81,10 @@ def nonitemsize_strides(arrs): out.append(c) return out + def make_nonnative(arrs): - out = [] - for a in arrs: - out.append(a.astype(a.dtype.newbyteorder())) - return out + return [a.astype(a.dtype.newbyteorder()) for a in arrs] + class BaseQRdeltas(object): def setup_method(self): diff --git a/scipy/optimize/_shgo.py b/scipy/optimize/_shgo.py index afa3bd86a7b8..b870957caaff 100644 --- a/scipy/optimize/_shgo.py +++ b/scipy/optimize/_shgo.py @@ -1097,9 +1097,7 @@ def construct_lcb_delaunay(self, v_min, ind=None): cbounds : list of lists List of size dim with length-2 list of bounds for each dimension """ - cbounds = [] - for x_b_i in self.bounds: - cbounds.append([x_b_i[0], x_b_i[1]]) + cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] return cbounds diff --git a/scipy/optimize/_trustregion_constr/canonical_constraint.py b/scipy/optimize/_trustregion_constr/canonical_constraint.py index 74c36cb2f190..7cd19cd2962c 100644 --- a/scipy/optimize/_trustregion_constr/canonical_constraint.py +++ b/scipy/optimize/_trustregion_constr/canonical_constraint.py @@ -99,12 +99,11 @@ def concatenate(cls, canonical_constraints, sparse_jacobian): must have their Jacobians in the same format. """ def fun(x): - eq_all = [] - ineq_all = [] - for c in canonical_constraints: - eq, ineq = c.fun(x) - eq_all.append(eq) - ineq_all.append(ineq) + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.fun(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] return np.hstack(eq_all), np.hstack(ineq_all) @@ -114,12 +113,12 @@ def fun(x): vstack = np.vstack def jac(x): - eq_all = [] - ineq_all = [] - for c in canonical_constraints: - eq, ineq = c.jac(x) - eq_all.append(eq) - ineq_all.append(ineq) + if canonical_constraints: + eq_all, ineq_all = zip( + *[c.jac(x) for c in canonical_constraints]) + else: + eq_all, ineq_all = [], [] + return vstack(eq_all), vstack(ineq_all) def hess(x, v_eq, v_ineq): diff --git a/scipy/signal/tests/mpsig.py b/scipy/signal/tests/mpsig.py index 237557d93775..49cc2e353282 100644 --- a/scipy/signal/tests/mpsig.py +++ b/scipy/signal/tests/mpsig.py @@ -78,9 +78,7 @@ def _butter_analog_poles(n): scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used, and only the poles are returned. """ - poles = [] - for k in range(-n+1, n, 2): - poles.append(-mpmath.exp(1j*mpmath.pi*k/(2*n))) + poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)] return poles diff --git a/scipy/spatial/_spherical_voronoi.py b/scipy/spatial/_spherical_voronoi.py index eb2d1b4dca26..10b6d0b9feb8 100644 --- a/scipy/spatial/_spherical_voronoi.py +++ b/scipy/spatial/_spherical_voronoi.py @@ -304,10 +304,9 @@ def _calc_vertices_regions(self): # group by generator indices to produce # unsorted regions in nested list - groups = [] - for k, g in itertools.groupby(array_associations, - lambda t: t[0]): - groups.append(list(list(zip(*list(g)))[1])) + groups = [list(list(zip(*list(g)))[1]) + for k, g in itertools.groupby(array_associations, + lambda t: t[0])] self.regions = groups diff --git a/scipy/spatial/qhull.pyx b/scipy/spatial/qhull.pyx index cfc5d345cd3c..50d2832b4074 100644 --- a/scipy/spatial/qhull.pyx +++ b/scipy/spatial/qhull.pyx @@ -295,10 +295,8 @@ cdef class _Qhull: if incremental: incremental_bad_ops = set([b'Qbb', b'Qbk', b'QBk', b'QbB', b'Qz']) - bad_opts = [] - for bad_opt in incremental_bad_ops: - if bad_opt in options: - bad_opts.append(bad_opt) + bad_opts = [bad_opt for bad_opt in incremental_bad_ops + if bad_opt in options] if bad_opts: raise ValueError("Qhull options %r are incompatible " "with incremental mode" % (bad_opts,)) diff --git a/scipy/spatial/tests/test_qhull.py b/scipy/spatial/tests/test_qhull.py index 3b2b654ea7ef..c7b61a6db8f4 100644 --- a/scipy/spatial/tests/test_qhull.py +++ b/scipy/spatial/tests/test_qhull.py @@ -427,9 +427,8 @@ def _check(self, tri): indptr, indices = tri.vertex_neighbor_vertices - got = [] - for j in range(tri.points.shape[0]): - got.append(set(map(int, indices[indptr[j]:indptr[j+1]]))) + got = [set(map(int, indices[indptr[j]:indptr[j+1]])) + for j in range(tri.points.shape[0])] assert_equal(got, expected, err_msg="%r != %r" % (got, expected)) diff --git a/scipy/special/_generate_pyx.py b/scipy/special/_generate_pyx.py index ad4fe69c4502..bf059e3d2105 100644 --- a/scipy/special/_generate_pyx.py +++ b/scipy/special/_generate_pyx.py @@ -756,12 +756,8 @@ def _get_types(self, codes): return all_types, fused_types def _get_vars(self): - invars = [] - for n in range(len(self.intypes)): - invars.append("x{}".format(n)) - outvars = [] - for n in range(len(self.outtypes)): - outvars.append("y{}".format(n)) + invars = ["x{}".format(n) for n in range(len(self.intypes))] + outvars = ["y{}".format(n) for n in range(len(self.outtypes))] return invars, outvars def _get_conditional(self, types, codes, adverb): @@ -842,11 +838,7 @@ def _get_nan_decs(self): return lines # Set fused-type variables to nan - all_codes = [] - for fused_type in fused_types: - _, codes = fused_type - all_codes.append(codes) - all_codes = tuple(all_codes) + all_codes = tuple([codes for _unused, codes in fused_types]) codelens = list(map(lambda x: len(x), all_codes)) last = numpy.product(codelens) - 1 @@ -878,10 +870,8 @@ def _get_tmp_decs(self, all_tmpvars): tab = " "*4 tmpvars = list(all_tmpvars) tmpvars.sort() - tmpdecs = [] - for tmpvar in tmpvars: - line = "cdef npy_cdouble {}".format(tmpvar) - tmpdecs.append(tab + line) + tmpdecs = [tab + "cdef npy_cdouble {}".format(tmpvar) + for tmpvar in tmpvars] return tmpdecs def _get_python_wrap(self): diff --git a/scipy/special/_mptestutils.py b/scipy/special/_mptestutils.py index 4656235f62b6..1e43b664a864 100644 --- a/scipy/special/_mptestutils.py +++ b/scipy/special/_mptestutils.py @@ -173,9 +173,7 @@ def get_args(argspec, n): ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec]) ms = (n**(ms/sum(ms))).astype(int) + 1 - args = [] - for spec, m in zip(argspec, ms): - args.append(spec.values(m)) + args = [spec.values(m) for spec, m in zip(argspec, ms)] args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T return args diff --git a/scipy/special/_precompute/gammainc_asy.py b/scipy/special/_precompute/gammainc_asy.py index 2bbcf3cd8826..486420291322 100644 --- a/scipy/special/_precompute/gammainc_asy.py +++ b/scipy/special/_precompute/gammainc_asy.py @@ -34,9 +34,7 @@ def compute_a(n): def compute_g(n): """g_k from DLMF 5.11.3/5.11.5""" a = compute_a(2*n) - g = [] - for k in range(n): - g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k]) + g = [mp.sqrt(2)*mp.rf(0.5, k)*a[2*k] for k in range(n)] return g diff --git a/scipy/special/_precompute/lambertw.py b/scipy/special/_precompute/lambertw.py index 34ab915cc2a2..c70136c003ad 100644 --- a/scipy/special/_precompute/lambertw.py +++ b/scipy/special/_precompute/lambertw.py @@ -15,9 +15,7 @@ def lambertw_pade(): - derivs = [] - for n in range(6): - derivs.append(mpmath.diff(mpmath.lambertw, 0, n=n)) + derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)] p, q = mpmath.pade(derivs, 3, 2) return p, q diff --git a/scipy/special/_precompute/loggamma.py b/scipy/special/_precompute/loggamma.py index bbaee613e842..cca4e1c01825 100644 --- a/scipy/special/_precompute/loggamma.py +++ b/scipy/special/_precompute/loggamma.py @@ -8,10 +8,9 @@ def stirling_series(N): - coeffs = [] with mpmath.workdps(100): - for n in range(1, N + 1): - coeffs.append(mpmath.bernoulli(2*n)/(2*n*(2*n - 1))) + coeffs = [mpmath.bernoulli(2*n)/(2*n*(2*n - 1)) + for n in range(1, N + 1)] return coeffs diff --git a/scipy/special/tests/test_cython_special.py b/scipy/special/tests/test_cython_special.py index ea5b494a2db8..56ae4b1c4afb 100644 --- a/scipy/special/tests/test_cython_special.py +++ b/scipy/special/tests/test_cython_special.py @@ -313,10 +313,9 @@ def test_cython_api(param): # Check results for typecodes in specializations: # Pick the correct specialized function - signature = [] - for j, code in enumerate(typecodes): - if is_fused_code[j]: - signature.append(CYTHON_SIGNATURE_MAP[code]) + signature = [CYTHON_SIGNATURE_MAP[code] + for j, code in enumerate(typecodes) + if is_fused_code[j]] if signature: cy_spec_func = cyfunc[tuple(signature)] diff --git a/scipy/special/tests/test_mpmath.py b/scipy/special/tests/test_mpmath.py index 96aeb536b2bf..5b1517371389 100644 --- a/scipy/special/tests/test_mpmath.py +++ b/scipy/special/tests/test_mpmath.py @@ -365,9 +365,7 @@ def test_loggamma_taylor_transition(): dz = r*np.exp(1j*theta) z = np.r_[1 + dz, 2 + dz].flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(mpmath.loggamma(z0)))) + dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z] dataset = np.array(dataset) FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() @@ -383,9 +381,7 @@ def test_loggamma_taylor(): dz = r*np.exp(1j*theta) z = np.r_[1 + dz, 2 + dz].flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(mpmath.loggamma(z0)))) + dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z] dataset = np.array(dataset) FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check() @@ -409,10 +405,8 @@ def test_rgamma_zeros(): dz = dx + 1j*dy zeros = np.arange(0, -170, -1).reshape(1, 1, -1) z = (zeros + np.dstack((dz,)*zeros.size)).flatten() - dataset = [] with mpmath.workdps(100): - for z0 in z: - dataset.append((z0, complex(mpmath.rgamma(z0)))) + dataset = [(z0, complex(mpmath.rgamma(z0))) for z0 in z] dataset = np.array(dataset) FuncData(sc.rgamma, dataset, 0, 1, rtol=1e-12).check() @@ -438,10 +432,8 @@ def test_digamma_roots(): dx, dy = np.meshgrid(dx, dy) dz = dx + 1j*dy z = (roots + np.dstack((dz,)*roots.size)).flatten() - dataset = [] with mpmath.workdps(30): - for z0 in z: - dataset.append((z0, complex(mpmath.digamma(z0)))) + dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z] dataset = np.array(dataset) FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check() @@ -460,11 +452,8 @@ def test_digamma_negreal(): x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() - dataset = [] with mpmath.workdps(40): - for z0 in z: - res = digamma(z0) - dataset.append((z0, complex(res))) + dataset = [(z0, complex(digamma(z0))) for z0 in z] dataset = np.asarray(dataset) FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() @@ -480,11 +469,8 @@ def test_digamma_boundary(): x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() - dataset = [] with mpmath.workdps(30): - for z0 in z: - res = mpmath.digamma(z0) - dataset.append((z0, complex(res))) + dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z] dataset = np.asarray(dataset) FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check() @@ -503,10 +489,9 @@ def test_gammainc_boundary(): x = a.copy() a, x = np.meshgrid(a, x) a, x = a.flatten(), x.flatten() - dataset = [] with mpmath.workdps(100): - for a0, x0 in zip(a, x): - dataset.append((a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True)))) + dataset = [(a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True))) + for a0, x0 in zip(a, x)] dataset = np.array(dataset) FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-12).check() @@ -528,11 +513,8 @@ def spence(z): r = np.linspace(0.5, 1.5) theta = np.linspace(0, 2*pi) z = (1 + np.outer(r, np.exp(1j*theta))).flatten() - dataset = [] - for z0 in z: - dataset.append((z0, spence(z0))) + dataset = np.asarray([(z0, spence(z0)) for z0 in z]) - dataset = np.array(dataset) FuncData(sc.spence, dataset, 0, 1, rtol=1e-14).check() @@ -549,11 +531,8 @@ def test_sinpi_zeros(): dz = dx + 1j*dy zeros = np.arange(-100, 100, 1).reshape(1, 1, -1) z = (zeros + np.dstack((dz,)*zeros.size)).flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(mpmath.sinpi(z0)))) - - dataset = np.array(dataset) + dataset = np.asarray([(z0, complex(mpmath.sinpi(z0))) + for z0 in z]) FuncData(_sinpi, dataset, 0, 1, rtol=2*eps).check() @@ -566,11 +545,9 @@ def test_cospi_zeros(): dz = dx + 1j*dy zeros = (np.arange(-100, 100, 1) + 0.5).reshape(1, 1, -1) z = (zeros + np.dstack((dz,)*zeros.size)).flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(mpmath.cospi(z0)))) + dataset = np.asarray([(z0, complex(mpmath.cospi(z0))) + for z0 in z]) - dataset = np.array(dataset) FuncData(_cospi, dataset, 0, 1, rtol=2*eps).check() @@ -628,10 +605,8 @@ def test_wrightomega_branch(): x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) - dataset = np.asarray(dataset) + dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25))) + for z0 in z]) FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-8).check() @@ -645,10 +620,8 @@ def test_wrightomega_region1(): x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) - dataset = np.asarray(dataset) + dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25))) + for z0 in z]) FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() @@ -662,10 +635,8 @@ def test_wrightomega_region2(): x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(_mpmath_wrightomega(z0, 25)))) - dataset = np.asarray(dataset) + dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25))) + for z0 in z]) FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check() @@ -681,10 +652,8 @@ def test_lambertw_smallz(): x, y = np.meshgrid(x, y) z = (x + 1j*y).flatten() - dataset = [] - for z0 in z: - dataset.append((z0, complex(mpmath.lambertw(z0)))) - dataset = np.asarray(dataset) + dataset = np.asarray([(z0, complex(mpmath.lambertw(z0))) + for z0 in z]) FuncData(sc.lambertw, dataset, 0, 1, rtol=1e-13).check() diff --git a/scipy/special/tests/test_precompute_gammainc.py b/scipy/special/tests/test_precompute_gammainc.py index 03a594b01e98..bfb15d9ab16a 100644 --- a/scipy/special/tests/test_precompute_gammainc.py +++ b/scipy/special/tests/test_precompute_gammainc.py @@ -81,9 +81,7 @@ def test_d(): (9, 0, -mp.mpf('0.596761290192746250124390067179e-3')), (9, 12, mp.mpf('0.870823417786464116761231237189e-6'))] d = compute_d(10, 13) - res = [] - for k, n, std in dataset: - res.append(d[k][n]) + res = [d[k][n] for k, n, std in dataset] std = map(lambda x: x[2], dataset) mp_assert_allclose(res, std) diff --git a/scipy/special/utils/datafunc.py b/scipy/special/utils/datafunc.py index fb886979f578..e0888f07e35c 100644 --- a/scipy/special/utils/datafunc.py +++ b/scipy/special/utils/datafunc.py @@ -9,9 +9,7 @@ def parse_txt_data(filename): f = open(filename) try: reader = csv.reader(f, delimiter=',') - data = [] - for row in reader: - data.append(list(map(float, row))) + data = [list(map(float, row)) for row in reader] nc = len(data[0]) for i in data: if not nc == len(i): @@ -48,11 +46,7 @@ def run_test(filename, funcs, args=[0]): x = [data[args[i]] for i in nargs] return f(*x) else: - y = [] - i = 1 - for f in funcs: - y.append(f(data[:, 0]) - data[:, i]) - i += 1 + y = [f(data[:, 0]) - data[:, idx + 1] for idx, f in enumerate(funcs)] return data[:, 0], y diff --git a/scipy/stats/_distn_infrastructure.py b/scipy/stats/_distn_infrastructure.py index d1b8524c759d..a5fa331ce9f5 100644 --- a/scipy/stats/_distn_infrastructure.py +++ b/scipy/stats/_distn_infrastructure.py @@ -1074,10 +1074,7 @@ def stats(self, *args, **kwds): place(out0, cond, g2) output.append(out0) else: # no valid args - output = [] - for _ in moments: - out0 = default.copy() - output.append(out0) + output = [default.copy() for _ in moments] if len(output) == 1: return output[0] diff --git a/scipy/stats/morestats.py b/scipy/stats/morestats.py index 901663d3d469..39f762541b9d 100644 --- a/scipy/stats/morestats.py +++ b/scipy/stats/morestats.py @@ -2446,9 +2446,7 @@ def _apply_func(x, g, func): # separating x into different groups # func should be applied over the groups g = unique(r_[0, g, len(x)]) - output = [] - for k in range(len(g) - 1): - output.append(func(x[g[k]:g[k+1]])) + output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)] return asarray(output) diff --git a/scipy/stats/tests/test_distributions.py b/scipy/stats/tests/test_distributions.py index 08e20850f3ab..dc11f0cd4be4 100644 --- a/scipy/stats/tests/test_distributions.py +++ b/scipy/stats/tests/test_distributions.py @@ -535,10 +535,8 @@ def test_precision2(self): pears = 1.1e5 fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4 quantile = 2e4 - res = [] - for eaten in fruits_eaten: - res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, - eaten)) + res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten) + for eaten in fruits_eaten] expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32, 8.265601e-11, 0.1237904, 1]) assert_allclose(res, expected, atol=0, rtol=5e-7) diff --git a/scipy/stats/tests/test_morestats.py b/scipy/stats/tests/test_morestats.py index 4a1e86d8de02..721b25b21d1d 100644 --- a/scipy/stats/tests/test_morestats.py +++ b/scipy/stats/tests/test_morestats.py @@ -924,9 +924,7 @@ class TestKstat(object): def test_moments_normal_distribution(self): np.random.seed(32149) data = np.random.randn(12345) - moments = [] - for n in [1, 2, 3, 4]: - moments.append(stats.kstat(data, n)) + moments = [stats.kstat(data, n) for n in [1, 2, 3, 4]] expected = [0.011315, 1.017931, 0.05811052, 0.0754134] assert_allclose(moments, expected, rtol=1e-4) From abcc9b503e483ca33709b8cff1f413420b9ba8a5 Mon Sep 17 00:00:00 2001 From: Katrin Leinweber <9948149+katrinleinweber@users.noreply.github.com> Date: Sat, 15 Dec 2018 13:28:08 +0100 Subject: [PATCH 68/70] Hyperlink DOI to preferred resolver --- scipy/special/cephes/kolmogorov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scipy/special/cephes/kolmogorov.c b/scipy/special/cephes/kolmogorov.c index dd00aa3a8387..8ceec64e8046 100644 --- a/scipy/special/cephes/kolmogorov.c +++ b/scipy/special/cephes/kolmogorov.c @@ -35,7 +35,7 @@ * Usp. Mat. Nauk, 1944. http://mi.mathnet.ru/umn8798 * [2] Birnbaum, Z. W. and Tingey, Fred H. * "One-Sided Confidence Contours for Probability Distribution Functions", - * Ann. Math. Statist. 1951. http://dx.doi.org/10.1214/aoms/1177729550 + * Ann. Math. Statist. 1951. https://doi.org/10.1214/aoms/1177729550 * [3] Dwass, Meyer, "The Distribution of a Generalized $\mathrm{D}^+_n$ Statistic", * Ann. Math. Statist., 1959. https://doi.org/10.1214/aoms/1177706085 * [4] van Mulbregt, Paul, "Computing the Cumulative Distribution Function and Quantiles of the One-sided Kolmogorov-Smirnov Statistic" From 24f1c2639499cccc0615a7ed827c116f7dbf82c7 Mon Sep 17 00:00:00 2001 From: Albert Thomas Date: Sat, 15 Dec 2018 20:23:19 +0100 Subject: [PATCH 69/70] DOC: Replace cumulative density with cumulative distribution (#9593) DOC: stats: replace "cumulative density" with "cumulative distribution" --- scipy/special/__init__.py | 6 +++--- scipy/special/add_newdocs.py | 18 +++++++++--------- scipy/stats/_continuous_distns.py | 6 ++++-- scipy/stats/tests/test_distributions.py | 4 +++- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/scipy/special/__init__.py b/scipy/special/__init__.py index 0a937f09cc7b..27556ca81e72 100644 --- a/scipy/special/__init__.py +++ b/scipy/special/__init__.py @@ -199,7 +199,7 @@ bdtri -- Inverse function to `bdtr` with respect to `p`. bdtrik -- Inverse function to `bdtr` with respect to `k`. bdtrin -- Inverse function to `bdtr` with respect to `n`. - btdtr -- Cumulative density function of the beta distribution. + btdtr -- Cumulative distribution function of the beta distribution. btdtri -- The `p`-th quantile of the beta distribution. btdtria -- Inverse of `btdtr` with respect to `a`. btdtrib -- btdtria(a, p, x) @@ -207,7 +207,7 @@ fdtrc -- F survival function. fdtri -- The `p`-th quantile of the F-distribution. fdtridfd -- Inverse to `fdtr` vs dfd - gdtr -- Gamma distribution cumulative density function. + gdtr -- Gamma distribution cumulative distribution function. gdtrc -- Gamma distribution survival function. gdtria -- Inverse of `gdtr` vs a. gdtrib -- Inverse of `gdtr` vs b. @@ -232,7 +232,7 @@ pdtrc -- Poisson survival function pdtri -- Inverse to `pdtr` vs m pdtrik -- Inverse to `pdtr` vs k - stdtr -- Student t distribution cumulative density function + stdtr -- Student t distribution cumulative distribution function stdtridf -- Inverse of `stdtr` vs df stdtrit -- Inverse of `stdtr` vs `t` chdtr -- Chi square cumulative distribution function diff --git a/scipy/special/add_newdocs.py b/scipy/special/add_newdocs.py index c2b10bb594f6..6e10aa8998be 100644 --- a/scipy/special/add_newdocs.py +++ b/scipy/special/add_newdocs.py @@ -599,7 +599,7 @@ def add_newdoc(place, name, doc): See Also -------- - btdtr : Cumulative density function of the beta distribution. + btdtr : Cumulative distribution function of the beta distribution. btdtri : Inverse with respect to `x`. btdtrib : Inverse with respect to `b`. @@ -652,7 +652,7 @@ def add_newdoc(place, name, doc): See Also -------- - btdtr : Cumulative density function of the beta distribution. + btdtr : Cumulative distribution function of the beta distribution. btdtri : Inverse with respect to `x`. btdtria : Inverse with respect to `a`. @@ -925,7 +925,7 @@ def add_newdoc(place, name, doc): r""" btdtr(a, b, x) - Cumulative density function of the beta distribution. + Cumulative distribution function of the beta distribution. Returns the integral from zero to `x` of the beta probability density function, @@ -947,8 +947,8 @@ def add_newdoc(place, name, doc): Returns ------- I : ndarray - Cumulative density function of the beta distribution with parameters - `a` and `b` at `x`. + Cumulative distribution function of the beta distribution with + parameters `a` and `b` at `x`. See Also -------- @@ -2431,7 +2431,7 @@ def add_newdoc(place, name, doc): F cumulative distribution function. - Returns the value of the cumulative density function of the + Returns the value of the cumulative distribution function of the F-distribution, also known as Snedecor's F-distribution or the Fisher-Snedecor distribution. @@ -2794,7 +2794,7 @@ def add_newdoc(place, name, doc): r""" gdtr(a, b, x) - Gamma distribution cumulative density function. + Gamma distribution cumulative distribution function. Returns the integral from zero to `x` of the gamma probability density function, @@ -4489,7 +4489,7 @@ def add_newdoc(place, name, doc): Show the probability of a gap at least as big as 0, 0.5 and 1.0. >>> from scipy.special import kolmogorov - >>> from scipy.stats import kstwobign + >>> from scipy.stats import kstwobign >>> kolmogorov([0, 0.5, 1.0]) array([ 1. , 0.96394524, 0.26999967]) @@ -6654,7 +6654,7 @@ def add_newdoc(place, name, doc): """ stdtr(df, t) - Student t distribution cumulative density function + Student t distribution cumulative distribution function Returns the integral from minus infinity to t of the Student t distribution with df > 0 degrees of freedom:: diff --git a/scipy/stats/_continuous_distns.py b/scipy/stats/_continuous_distns.py index 2ab06f8434c8..44c05bb7d67f 100644 --- a/scipy/stats/_continuous_distns.py +++ b/scipy/stats/_continuous_distns.py @@ -4002,8 +4002,10 @@ def _cdf(self, x, alpha, beta): data_out[data_mask] = np.array([levy_stable._cdf_single_value_zolotarev(_x, _alpha, _beta) for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1) else: - warnings.warn('Cumulative density calculations experimental for FFT method.' + - ' Use zolatarev method instead.', RuntimeWarning) + warnings.warn('FFT method is considered experimental for ' + + 'cumulative distribution function ' + + 'evaluations. Use Zolotarev’s method instead).', + RuntimeWarning) _alpha, _beta = pair _x = data_subset[:,(0,)] diff --git a/scipy/stats/tests/test_distributions.py b/scipy/stats/tests/test_distributions.py index dc11f0cd4be4..cbdca83439bd 100644 --- a/scipy/stats/tests/test_distributions.py +++ b/scipy/stats/tests/test_distributions.py @@ -1598,7 +1598,9 @@ def test_cdf_nolan_samples(self): stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points subdata = data[filter_func(data)] if filter_func is not None else data with suppress_warnings() as sup: - sup.record(RuntimeWarning, "Cumulative density calculations experimental for FFT method.*") + sup.record(RuntimeWarning, 'FFT method is considered ' + + 'experimental for cumulative distribution ' + + 'function evaluations.*') p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0) subdata2 = rec_append_fields(subdata, 'calc', p) failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)] From bab939bffd939c48ecff878d26041a501d4fe096 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 16 Dec 2018 06:41:31 +0200 Subject: [PATCH 70/70] DEV: try to limit GC memory use on PyPy --- .circleci/config.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 04ebe1435621..d28b4f2e1e0b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -137,9 +137,8 @@ jobs: command: | # CircleCI has 4G memory limit, play it safe export SCIPY_AVAILABLE_MEM=1G - # Limit OpenBLAS to 1 thread, PyPy crashes otherwise (at least for - # OpenBLAS v0.3.0), see gh-9530. - export OPENBLAS_NUM_THREADS=1 + # Try to limit per-process GC memory usage + export PYPY_GC_MAX=900MB pypy3 runtests.py -- -rfEX -n 3 --durations=30