add read me
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,110 @@
|
||||
""" Test for assert_deallocated context manager and gc utilities
|
||||
"""
|
||||
import gc
|
||||
from threading import Lock
|
||||
|
||||
from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated,
|
||||
ReferenceError, IS_PYPY)
|
||||
|
||||
from numpy.testing import assert_equal
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def gc_lock():
|
||||
return Lock()
|
||||
|
||||
|
||||
def test_set_gc_state(gc_lock):
|
||||
with gc_lock:
|
||||
gc_status = gc.isenabled()
|
||||
try:
|
||||
for state in (True, False):
|
||||
gc.enable()
|
||||
set_gc_state(state)
|
||||
assert_equal(gc.isenabled(), state)
|
||||
gc.disable()
|
||||
set_gc_state(state)
|
||||
assert_equal(gc.isenabled(), state)
|
||||
finally:
|
||||
if gc_status:
|
||||
gc.enable()
|
||||
|
||||
|
||||
def test_gc_state(gc_lock):
|
||||
# Test gc_state context manager
|
||||
with gc_lock:
|
||||
gc_status = gc.isenabled()
|
||||
try:
|
||||
for pre_state in (True, False):
|
||||
set_gc_state(pre_state)
|
||||
for with_state in (True, False):
|
||||
# Check the gc state is with_state in with block
|
||||
with gc_state(with_state):
|
||||
assert_equal(gc.isenabled(), with_state)
|
||||
# And returns to previous state outside block
|
||||
assert_equal(gc.isenabled(), pre_state)
|
||||
# Even if the gc state is set explicitly within the block
|
||||
with gc_state(with_state):
|
||||
assert_equal(gc.isenabled(), with_state)
|
||||
set_gc_state(not with_state)
|
||||
assert_equal(gc.isenabled(), pre_state)
|
||||
finally:
|
||||
if gc_status:
|
||||
gc.enable()
|
||||
|
||||
|
||||
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
||||
def test_assert_deallocated(gc_lock):
|
||||
# Ordinary use
|
||||
class C:
|
||||
def __init__(self, arg0, arg1, name='myname'):
|
||||
self.name = name
|
||||
with gc_lock:
|
||||
for gc_current in (True, False):
|
||||
with gc_state(gc_current):
|
||||
# We are deleting from with-block context, so that's OK
|
||||
with assert_deallocated(C, 0, 2, 'another name') as c:
|
||||
assert_equal(c.name, 'another name')
|
||||
del c
|
||||
# Or not using the thing in with-block context, also OK
|
||||
with assert_deallocated(C, 0, 2, name='third name'):
|
||||
pass
|
||||
assert_equal(gc.isenabled(), gc_current)
|
||||
|
||||
|
||||
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
||||
def test_assert_deallocated_nodel():
|
||||
class C:
|
||||
pass
|
||||
with pytest.raises(ReferenceError):
|
||||
# Need to delete after using if in with-block context
|
||||
# Note: assert_deallocated(C) needs to be assigned for the test
|
||||
# to function correctly. It is assigned to _, but _ itself is
|
||||
# not referenced in the body of the with, it is only there for
|
||||
# the refcount.
|
||||
with assert_deallocated(C) as _:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
||||
def test_assert_deallocated_circular():
|
||||
class C:
|
||||
def __init__(self):
|
||||
self._circular = self
|
||||
with pytest.raises(ReferenceError):
|
||||
# Circular reference, no automatic garbage collection
|
||||
with assert_deallocated(C) as c:
|
||||
del c
|
||||
|
||||
|
||||
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
||||
def test_assert_deallocated_circular2():
|
||||
class C:
|
||||
def __init__(self):
|
||||
self._circular = self
|
||||
with pytest.raises(ReferenceError):
|
||||
# Still circular reference, no automatic garbage collection
|
||||
with assert_deallocated(C):
|
||||
pass
|
||||
@@ -0,0 +1,67 @@
|
||||
from pytest import raises as assert_raises
|
||||
from scipy._lib._pep440 import Version, parse
|
||||
|
||||
|
||||
def test_main_versions():
|
||||
assert Version('1.8.0') == Version('1.8.0')
|
||||
for ver in ['1.9.0', '2.0.0', '1.8.1']:
|
||||
assert Version('1.8.0') < Version(ver)
|
||||
|
||||
for ver in ['1.7.0', '1.7.1', '0.9.9']:
|
||||
assert Version('1.8.0') > Version(ver)
|
||||
|
||||
|
||||
def test_version_1_point_10():
|
||||
# regression test for gh-2998.
|
||||
assert Version('1.9.0') < Version('1.10.0')
|
||||
assert Version('1.11.0') < Version('1.11.1')
|
||||
assert Version('1.11.0') == Version('1.11.0')
|
||||
assert Version('1.99.11') < Version('1.99.12')
|
||||
|
||||
|
||||
def test_alpha_beta_rc():
|
||||
assert Version('1.8.0rc1') == Version('1.8.0rc1')
|
||||
for ver in ['1.8.0', '1.8.0rc2']:
|
||||
assert Version('1.8.0rc1') < Version(ver)
|
||||
|
||||
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
|
||||
assert Version('1.8.0rc1') > Version(ver)
|
||||
|
||||
assert Version('1.8.0b1') > Version('1.8.0a2')
|
||||
|
||||
|
||||
def test_dev_version():
|
||||
assert Version('1.9.0.dev+Unknown') < Version('1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1']:
|
||||
assert Version('1.9.0.dev+f16acvda') < Version(ver)
|
||||
|
||||
assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda')
|
||||
|
||||
|
||||
def test_dev_a_b_rc_mixed():
|
||||
assert Version('1.9.0a2.dev+f16acvda') == Version('1.9.0a2.dev+f16acvda')
|
||||
assert Version('1.9.0a2.dev+6acvda54') < Version('1.9.0a2')
|
||||
|
||||
|
||||
def test_dev0_version():
|
||||
assert Version('1.9.0.dev0+Unknown') < Version('1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
|
||||
assert Version('1.9.0.dev0+f16acvda') < Version(ver)
|
||||
|
||||
assert Version('1.9.0.dev0+f16acvda') == Version('1.9.0.dev0+f16acvda')
|
||||
|
||||
|
||||
def test_dev0_a_b_rc_mixed():
|
||||
assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda')
|
||||
assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2')
|
||||
|
||||
|
||||
def test_raises():
|
||||
for ver in ['1,9.0', '1.7.x']:
|
||||
assert_raises(ValueError, Version, ver)
|
||||
|
||||
def test_legacy_version():
|
||||
# Non-PEP-440 version identifiers always compare less. For NumPy this only
|
||||
# occurs on dev builds prior to 1.10.0 which are unsupported anyway.
|
||||
assert parse('invalid') < Version('0.0.0')
|
||||
assert parse('1.9.0-f16acvda') < Version('1.0.0')
|
||||
@@ -0,0 +1,32 @@
|
||||
import sys
|
||||
from scipy._lib._testutils import _parse_size, _get_mem_available
|
||||
import pytest
|
||||
|
||||
|
||||
def test__parse_size():
|
||||
expected = {
|
||||
'12': 12e6,
|
||||
'12 b': 12,
|
||||
'12k': 12e3,
|
||||
' 12 M ': 12e6,
|
||||
' 12 G ': 12e9,
|
||||
' 12Tb ': 12e12,
|
||||
'12 Mib ': 12 * 1024.0**2,
|
||||
'12Tib': 12 * 1024.0**4,
|
||||
}
|
||||
|
||||
for inp, outp in sorted(expected.items()):
|
||||
if outp is None:
|
||||
with pytest.raises(ValueError):
|
||||
_parse_size(inp)
|
||||
else:
|
||||
assert _parse_size(inp) == outp
|
||||
|
||||
|
||||
def test__mem_available():
|
||||
# May return None on non-Linux platforms
|
||||
available = _get_mem_available()
|
||||
if sys.platform.startswith('linux'):
|
||||
assert available >= 0
|
||||
else:
|
||||
assert available is None or available >= 0
|
||||
@@ -0,0 +1,51 @@
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from numpy.testing import assert_
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError
|
||||
|
||||
|
||||
def test_parallel_threads():
|
||||
# Check that ReentrancyLock serializes work in parallel threads.
|
||||
#
|
||||
# The test is not fully deterministic, and may succeed falsely if
|
||||
# the timings go wrong.
|
||||
|
||||
lock = ReentrancyLock("failure")
|
||||
|
||||
failflag = [False]
|
||||
exceptions_raised = []
|
||||
|
||||
def worker(k):
|
||||
try:
|
||||
with lock:
|
||||
assert_(not failflag[0])
|
||||
failflag[0] = True
|
||||
time.sleep(0.1 * k)
|
||||
assert_(failflag[0])
|
||||
failflag[0] = False
|
||||
except Exception:
|
||||
exceptions_raised.append(traceback.format_exc(2))
|
||||
|
||||
threads = [threading.Thread(target=lambda k=k: worker(k))
|
||||
for k in range(3)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
exceptions_raised = "\n".join(exceptions_raised)
|
||||
assert_(not exceptions_raised, exceptions_raised)
|
||||
|
||||
|
||||
def test_reentering():
|
||||
# Check that ReentrancyLock prevents re-entering from the same thread.
|
||||
|
||||
@non_reentrant()
|
||||
def func(x):
|
||||
return func(x)
|
||||
|
||||
assert_raises(ReentrancyError, func, 0)
|
||||
641
venv/lib/python3.12/site-packages/scipy/_lib/tests/test__util.py
Normal file
641
venv/lib/python3.12/site-packages/scipy/_lib/tests/test__util.py
Normal file
@@ -0,0 +1,641 @@
|
||||
from multiprocessing import Pool
|
||||
from multiprocessing.pool import Pool as PWL
|
||||
import re
|
||||
import math
|
||||
import functools
|
||||
from fractions import Fraction
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_equal, assert_
|
||||
import pytest
|
||||
from pytest import raises as assert_raises
|
||||
from scipy.conftest import skip_xp_invalid_arg
|
||||
|
||||
from scipy._lib._array_api import xp_assert_equal, is_numpy
|
||||
from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper,
|
||||
getfullargspec_no_self, FullArgSpec,
|
||||
rng_integers, _validate_int, _rename_parameter,
|
||||
_contains_nan, _rng_html_rewrite, _workers_wrapper)
|
||||
import scipy._lib.array_api_extra as xpx
|
||||
from scipy._lib.array_api_extra.testing import lazy_xp_function
|
||||
from scipy import cluster, interpolate, linalg, optimize, sparse, spatial, stats
|
||||
|
||||
|
||||
lazy_xp_function(_contains_nan)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test__aligned_zeros():
|
||||
niter = 10
|
||||
|
||||
def check(shape, dtype, order, align):
|
||||
err_msg = repr((shape, dtype, order, align))
|
||||
x = _aligned_zeros(shape, dtype, order, align=align)
|
||||
if align is None:
|
||||
align = np.dtype(dtype).alignment
|
||||
assert_equal(x.__array_interface__['data'][0] % align, 0)
|
||||
if hasattr(shape, '__len__'):
|
||||
assert_equal(x.shape, shape, err_msg)
|
||||
else:
|
||||
assert_equal(x.shape, (shape,), err_msg)
|
||||
assert_equal(x.dtype, dtype)
|
||||
if order == "C":
|
||||
assert_(x.flags.c_contiguous, err_msg)
|
||||
elif order == "F":
|
||||
if x.size > 0:
|
||||
# Size-0 arrays get invalid flags on NumPy 1.5
|
||||
assert_(x.flags.f_contiguous, err_msg)
|
||||
elif order is None:
|
||||
assert_(x.flags.c_contiguous, err_msg)
|
||||
else:
|
||||
raise ValueError()
|
||||
|
||||
# try various alignments
|
||||
for align in [1, 2, 3, 4, 8, 16, 32, 64, None]:
|
||||
for n in [0, 1, 3, 11]:
|
||||
for order in ["C", "F", None]:
|
||||
for dtype in [np.uint8, np.float64]:
|
||||
for shape in [n, (1, 2, 3, n)]:
|
||||
for j in range(niter):
|
||||
check(shape, dtype, order, align)
|
||||
|
||||
|
||||
def test_check_random_state():
|
||||
# If seed is None, return the RandomState singleton used by np.random.
|
||||
# If seed is an int, return a new RandomState instance seeded with seed.
|
||||
# If seed is already a RandomState instance, return it.
|
||||
# Otherwise raise ValueError.
|
||||
rsi = check_random_state(1)
|
||||
assert_equal(type(rsi), np.random.RandomState)
|
||||
rsi = check_random_state(rsi)
|
||||
assert_equal(type(rsi), np.random.RandomState)
|
||||
rsi = check_random_state(None)
|
||||
assert_equal(type(rsi), np.random.RandomState)
|
||||
assert_raises(ValueError, check_random_state, 'a')
|
||||
rg = np.random.Generator(np.random.PCG64())
|
||||
rsi = check_random_state(rg)
|
||||
assert_equal(type(rsi), np.random.Generator)
|
||||
|
||||
|
||||
def test_getfullargspec_no_self():
|
||||
p = MapWrapper(1)
|
||||
argspec = getfullargspec_no_self(p.__init__)
|
||||
assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [],
|
||||
None, {}))
|
||||
argspec = getfullargspec_no_self(p.__call__)
|
||||
assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None,
|
||||
[], None, {}))
|
||||
|
||||
class _rv_generic:
|
||||
def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs):
|
||||
return None
|
||||
|
||||
rv_obj = _rv_generic()
|
||||
argspec = getfullargspec_no_self(rv_obj._rvs)
|
||||
assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs',
|
||||
(2, 3), ['size'], {'size': None}, {}))
|
||||
|
||||
|
||||
def test_mapwrapper_serial():
|
||||
in_arg = np.arange(10.)
|
||||
out_arg = np.sin(in_arg)
|
||||
|
||||
p = MapWrapper(1)
|
||||
assert_(p._mapfunc is map)
|
||||
assert_(p.pool is None)
|
||||
assert_(p._own_pool is False)
|
||||
out = list(p(np.sin, in_arg))
|
||||
assert_equal(out, out_arg)
|
||||
|
||||
with assert_raises(RuntimeError):
|
||||
p = MapWrapper(0)
|
||||
|
||||
|
||||
def test_pool():
|
||||
with Pool(2) as p:
|
||||
p.map(math.sin, [1, 2, 3, 4])
|
||||
|
||||
|
||||
def test_mapwrapper_parallel():
|
||||
in_arg = np.arange(10.)
|
||||
out_arg = np.sin(in_arg)
|
||||
|
||||
with MapWrapper(2) as p:
|
||||
out = p(np.sin, in_arg)
|
||||
assert_equal(list(out), out_arg)
|
||||
|
||||
assert_(p._own_pool is True)
|
||||
assert_(isinstance(p.pool, PWL))
|
||||
assert_(p._mapfunc is not None)
|
||||
|
||||
# the context manager should've closed the internal pool
|
||||
# check that it has by asking it to calculate again.
|
||||
with assert_raises(Exception) as excinfo:
|
||||
p(np.sin, in_arg)
|
||||
|
||||
assert_(excinfo.type is ValueError)
|
||||
|
||||
# can also set a PoolWrapper up with a map-like callable instance
|
||||
with Pool(2) as p:
|
||||
q = MapWrapper(p.map)
|
||||
|
||||
assert_(q._own_pool is False)
|
||||
q.close()
|
||||
|
||||
# closing the PoolWrapper shouldn't close the internal pool
|
||||
# because it didn't create it
|
||||
out = p.map(np.sin, in_arg)
|
||||
assert_equal(list(out), out_arg)
|
||||
|
||||
|
||||
@_workers_wrapper
|
||||
def user_of_workers(x, b=1, workers=None):
|
||||
assert workers is not None
|
||||
assert isinstance(workers, MapWrapper)
|
||||
return np.array(list(workers(np.sin, x * b)))
|
||||
|
||||
|
||||
def test__workers_wrapper():
|
||||
arr = np.linspace(0, np.pi)
|
||||
req = np.sin(arr * 2.0)
|
||||
|
||||
with Pool(2) as p:
|
||||
v = user_of_workers(arr, workers=p.map, b=2)
|
||||
assert_equal(v, req)
|
||||
|
||||
v = user_of_workers(arr, workers=None, b=2)
|
||||
assert_equal(v, req)
|
||||
|
||||
v = user_of_workers(arr, workers=2, b=2)
|
||||
assert_equal(v, req)
|
||||
|
||||
# assess if decorator works with partial functions
|
||||
part_f = functools.partial(user_of_workers, b=2)
|
||||
assert_equal(part_f(arr), req)
|
||||
|
||||
with Pool(2) as p:
|
||||
part_f = functools.partial(user_of_workers, b=2, workers=p.map)
|
||||
assert_equal(part_f(arr), req)
|
||||
|
||||
|
||||
def test_rng_integers():
|
||||
rng = np.random.RandomState()
|
||||
|
||||
# test that numbers are inclusive of high point
|
||||
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
|
||||
assert np.max(arr) == 5
|
||||
assert np.min(arr) == 2
|
||||
assert arr.shape == (100, )
|
||||
|
||||
# test that numbers are inclusive of high point
|
||||
arr = rng_integers(rng, low=5, size=100, endpoint=True)
|
||||
assert np.max(arr) == 5
|
||||
assert np.min(arr) == 0
|
||||
assert arr.shape == (100, )
|
||||
|
||||
# test that numbers are exclusive of high point
|
||||
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
|
||||
assert np.max(arr) == 4
|
||||
assert np.min(arr) == 2
|
||||
assert arr.shape == (100, )
|
||||
|
||||
# test that numbers are exclusive of high point
|
||||
arr = rng_integers(rng, low=5, size=100, endpoint=False)
|
||||
assert np.max(arr) == 4
|
||||
assert np.min(arr) == 0
|
||||
assert arr.shape == (100, )
|
||||
|
||||
# now try with np.random.Generator
|
||||
try:
|
||||
rng = np.random.default_rng()
|
||||
except AttributeError:
|
||||
return
|
||||
|
||||
# test that numbers are inclusive of high point
|
||||
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
|
||||
assert np.max(arr) == 5
|
||||
assert np.min(arr) == 2
|
||||
assert arr.shape == (100, )
|
||||
|
||||
# test that numbers are inclusive of high point
|
||||
arr = rng_integers(rng, low=5, size=100, endpoint=True)
|
||||
assert np.max(arr) == 5
|
||||
assert np.min(arr) == 0
|
||||
assert arr.shape == (100, )
|
||||
|
||||
# test that numbers are exclusive of high point
|
||||
arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
|
||||
assert np.max(arr) == 4
|
||||
assert np.min(arr) == 2
|
||||
assert arr.shape == (100, )
|
||||
|
||||
# test that numbers are exclusive of high point
|
||||
arr = rng_integers(rng, low=5, size=100, endpoint=False)
|
||||
assert np.max(arr) == 4
|
||||
assert np.min(arr) == 0
|
||||
assert arr.shape == (100, )
|
||||
|
||||
|
||||
class TestValidateInt:
|
||||
|
||||
@pytest.mark.parametrize('n', [4, np.uint8(4), np.int16(4), np.array(4)])
|
||||
def test_validate_int(self, n):
|
||||
n = _validate_int(n, 'n')
|
||||
assert n == 4
|
||||
|
||||
@pytest.mark.parametrize('n', [4.0, np.array([4]), Fraction(4, 1)])
|
||||
def test_validate_int_bad(self, n):
|
||||
with pytest.raises(TypeError, match='n must be an integer'):
|
||||
_validate_int(n, 'n')
|
||||
|
||||
def test_validate_int_below_min(self):
|
||||
with pytest.raises(ValueError, match='n must be an integer not '
|
||||
'less than 0'):
|
||||
_validate_int(-1, 'n', 0)
|
||||
|
||||
|
||||
class TestRenameParameter:
|
||||
# check that wrapper `_rename_parameter` for backward-compatible
|
||||
# keyword renaming works correctly
|
||||
|
||||
# Example method/function that still accepts keyword `old`
|
||||
@_rename_parameter("old", "new")
|
||||
def old_keyword_still_accepted(self, new):
|
||||
return new
|
||||
|
||||
# Example method/function for which keyword `old` is deprecated
|
||||
@_rename_parameter("old", "new", dep_version="1.9.0")
|
||||
def old_keyword_deprecated(self, new):
|
||||
return new
|
||||
|
||||
def test_old_keyword_still_accepted(self):
|
||||
# positional argument and both keyword work identically
|
||||
res1 = self.old_keyword_still_accepted(10)
|
||||
res2 = self.old_keyword_still_accepted(new=10)
|
||||
res3 = self.old_keyword_still_accepted(old=10)
|
||||
assert res1 == res2 == res3 == 10
|
||||
|
||||
# unexpected keyword raises an error
|
||||
message = re.escape("old_keyword_still_accepted() got an unexpected")
|
||||
with pytest.raises(TypeError, match=message):
|
||||
self.old_keyword_still_accepted(unexpected=10)
|
||||
|
||||
# multiple values for the same parameter raises an error
|
||||
message = re.escape("old_keyword_still_accepted() got multiple")
|
||||
with pytest.raises(TypeError, match=message):
|
||||
self.old_keyword_still_accepted(10, new=10)
|
||||
with pytest.raises(TypeError, match=message):
|
||||
self.old_keyword_still_accepted(10, old=10)
|
||||
with pytest.raises(TypeError, match=message):
|
||||
self.old_keyword_still_accepted(new=10, old=10)
|
||||
|
||||
@pytest.fixture
|
||||
def kwarg_lock(self):
|
||||
from threading import Lock
|
||||
return Lock()
|
||||
|
||||
def test_old_keyword_deprecated(self, kwarg_lock):
|
||||
# positional argument and both keyword work identically,
|
||||
# but use of old keyword results in DeprecationWarning
|
||||
dep_msg = "Use of keyword argument `old` is deprecated"
|
||||
res1 = self.old_keyword_deprecated(10)
|
||||
res2 = self.old_keyword_deprecated(new=10)
|
||||
# pytest warning filter is not thread-safe, enforce serialization
|
||||
with kwarg_lock:
|
||||
with pytest.warns(DeprecationWarning, match=dep_msg):
|
||||
res3 = self.old_keyword_deprecated(old=10)
|
||||
assert res1 == res2 == res3 == 10
|
||||
|
||||
# unexpected keyword raises an error
|
||||
message = re.escape("old_keyword_deprecated() got an unexpected")
|
||||
with pytest.raises(TypeError, match=message):
|
||||
self.old_keyword_deprecated(unexpected=10)
|
||||
|
||||
# multiple values for the same parameter raises an error and,
|
||||
# if old keyword is used, results in DeprecationWarning
|
||||
message = re.escape("old_keyword_deprecated() got multiple")
|
||||
with pytest.raises(TypeError, match=message):
|
||||
self.old_keyword_deprecated(10, new=10)
|
||||
with kwarg_lock:
|
||||
with pytest.raises(TypeError, match=message), \
|
||||
pytest.warns(DeprecationWarning, match=dep_msg):
|
||||
# breakpoint()
|
||||
self.old_keyword_deprecated(10, old=10)
|
||||
with kwarg_lock:
|
||||
with pytest.raises(TypeError, match=message), \
|
||||
pytest.warns(DeprecationWarning, match=dep_msg):
|
||||
self.old_keyword_deprecated(new=10, old=10)
|
||||
|
||||
|
||||
class TestContainsNaN:
|
||||
def test_policy(self):
|
||||
data = np.array([1, 2, 3, np.nan])
|
||||
|
||||
assert _contains_nan(data) # default policy is "propagate"
|
||||
assert _contains_nan(data, nan_policy="propagate")
|
||||
assert _contains_nan(data, nan_policy="omit")
|
||||
assert not _contains_nan(data[:3])
|
||||
assert not _contains_nan(data[:3], nan_policy="propagate")
|
||||
assert not _contains_nan(data[:3], nan_policy="omit")
|
||||
|
||||
with pytest.raises(ValueError, match="The input contains nan values"):
|
||||
_contains_nan(data, nan_policy="raise")
|
||||
assert not _contains_nan(data[:3], nan_policy="raise")
|
||||
|
||||
with pytest.raises(ValueError, match="nan_policy must be one of"):
|
||||
_contains_nan(data, nan_policy="nan")
|
||||
|
||||
def test_contains_nan(self):
|
||||
# Special case: empty array
|
||||
assert not _contains_nan(np.array([], dtype=float))
|
||||
|
||||
# Integer arrays cannot contain NaN
|
||||
assert not _contains_nan(np.array([1, 2, 3]))
|
||||
assert not _contains_nan(np.array([[1, 2], [3, 4]]))
|
||||
|
||||
assert not _contains_nan(np.array([1., 2., 3.]))
|
||||
assert not _contains_nan(np.array([1., 2.j, 3.]))
|
||||
assert _contains_nan(np.array([1., 2.j, np.nan]))
|
||||
assert _contains_nan(np.array([1., 2., np.nan]))
|
||||
assert _contains_nan(np.array([np.nan, 2., np.nan]))
|
||||
assert not _contains_nan(np.array([[1., 2.], [3., 4.]]))
|
||||
assert _contains_nan(np.array([[1., 2.], [3., np.nan]]))
|
||||
|
||||
@skip_xp_invalid_arg
|
||||
def test_contains_nan_with_strings(self):
|
||||
data1 = np.array([1, 2, "3", np.nan]) # converted to string "nan"
|
||||
assert not _contains_nan(data1)
|
||||
|
||||
data2 = np.array([1, 2, "3", np.nan], dtype='object')
|
||||
assert _contains_nan(data2)
|
||||
|
||||
data3 = np.array([["1", 2], [3, np.nan]]) # converted to string "nan"
|
||||
assert not _contains_nan(data3)
|
||||
|
||||
data4 = np.array([["1", 2], [3, np.nan]], dtype='object')
|
||||
assert _contains_nan(data4)
|
||||
|
||||
@pytest.mark.skip_xp_backends(eager_only=True,
|
||||
reason="lazy backends tested separately")
|
||||
@pytest.mark.parametrize("nan_policy", ['propagate', 'omit', 'raise'])
|
||||
def test_array_api(self, xp, nan_policy):
|
||||
rng = np.random.default_rng(932347235892482)
|
||||
x0 = rng.random(size=(2, 3, 4))
|
||||
x = xp.asarray(x0)
|
||||
assert not _contains_nan(x, nan_policy)
|
||||
|
||||
x = xpx.at(x)[1, 2, 1].set(np.nan)
|
||||
|
||||
if nan_policy == 'raise':
|
||||
with pytest.raises(ValueError, match="The input contains nan values"):
|
||||
_contains_nan(x, nan_policy)
|
||||
elif nan_policy == 'omit' and not is_numpy(xp):
|
||||
with pytest.raises(ValueError, match="nan_policy='omit' is incompatible"):
|
||||
_contains_nan(x, nan_policy)
|
||||
assert _contains_nan(x, nan_policy, xp_omit_okay=True)
|
||||
elif nan_policy == 'propagate':
|
||||
assert _contains_nan(x, nan_policy)
|
||||
|
||||
@pytest.mark.skip_xp_backends("numpy", reason="lazy backends only")
|
||||
@pytest.mark.skip_xp_backends("cupy", reason="lazy backends only")
|
||||
@pytest.mark.skip_xp_backends("array_api_strict", reason="lazy backends only")
|
||||
@pytest.mark.skip_xp_backends("torch", reason="lazy backends only")
|
||||
def test_array_api_lazy(self, xp):
|
||||
rng = np.random.default_rng(932347235892482)
|
||||
x0 = rng.random(size=(2, 3, 4))
|
||||
x = xp.asarray(x0)
|
||||
|
||||
xp_assert_equal(_contains_nan(x), xp.asarray(False))
|
||||
xp_assert_equal(_contains_nan(x, "propagate"), xp.asarray(False))
|
||||
xp_assert_equal(_contains_nan(x, "omit", xp_omit_okay=True), xp.asarray(False))
|
||||
# Lazy arrays don't support "omit" and "raise" policies
|
||||
match = "not supported for lazy arrays"
|
||||
with pytest.raises(TypeError, match=match):
|
||||
_contains_nan(x, "omit")
|
||||
with pytest.raises(TypeError, match=match):
|
||||
_contains_nan(x, "raise")
|
||||
|
||||
x = xpx.at(x)[1, 2, 1].set(np.nan)
|
||||
|
||||
xp_assert_equal(_contains_nan(x), xp.asarray(True))
|
||||
xp_assert_equal(_contains_nan(x, "propagate"), xp.asarray(True))
|
||||
xp_assert_equal(_contains_nan(x, "omit", xp_omit_okay=True), xp.asarray(True))
|
||||
with pytest.raises(TypeError, match=match):
|
||||
_contains_nan(x, "omit")
|
||||
with pytest.raises(TypeError, match=match):
|
||||
_contains_nan(x, "raise")
|
||||
|
||||
|
||||
def test__rng_html_rewrite():
|
||||
def mock_str():
|
||||
lines = [
|
||||
'np.random.default_rng(8989843)',
|
||||
'np.random.default_rng(seed)',
|
||||
'np.random.default_rng(0x9a71b21474694f919882289dc1559ca)',
|
||||
' bob ',
|
||||
]
|
||||
return lines
|
||||
|
||||
res = _rng_html_rewrite(mock_str)()
|
||||
ref = [
|
||||
'np.random.default_rng()',
|
||||
'np.random.default_rng(seed)',
|
||||
'np.random.default_rng()',
|
||||
' bob ',
|
||||
]
|
||||
|
||||
assert res == ref
|
||||
|
||||
|
||||
class TestTransitionToRNG:
|
||||
def kmeans(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs)
|
||||
|
||||
def kmeans2(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs)
|
||||
|
||||
def barycentric(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
x1, x2, y1 = rng.random((3, 10))
|
||||
f = interpolate.BarycentricInterpolator(x1, y1, **kwargs)
|
||||
return f(x2)
|
||||
|
||||
def clarkson_woodruff_transform(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
return linalg.clarkson_woodruff_transform(rng.random((10, 10)), 3, **kwargs)
|
||||
|
||||
def basinhopping(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
return optimize.basinhopping(optimize.rosen, rng.random(3), **kwargs).x
|
||||
|
||||
def opt(self, fun, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
bounds = optimize.Bounds(-rng.random(3) * 10, rng.random(3) * 10)
|
||||
return fun(optimize.rosen, bounds, **kwargs).x
|
||||
|
||||
def differential_evolution(self, **kwargs):
|
||||
return self.opt(optimize.differential_evolution, **kwargs)
|
||||
|
||||
def dual_annealing(self, **kwargs):
|
||||
return self.opt(optimize.dual_annealing, **kwargs)
|
||||
|
||||
def check_grad(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
x = rng.random(3)
|
||||
return optimize.check_grad(optimize.rosen, optimize.rosen_der, x,
|
||||
direction='random', **kwargs)
|
||||
|
||||
def random_array(self, **kwargs):
|
||||
return sparse.random_array((10, 10), density=1.0, **kwargs).toarray()
|
||||
|
||||
def random(self, **kwargs):
|
||||
return sparse.random(10, 10, density=1.0, **kwargs).toarray()
|
||||
|
||||
def rand(self, **kwargs):
|
||||
return sparse.rand(10, 10, density=1.0, **kwargs).toarray()
|
||||
|
||||
def svds(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
A = rng.random((10, 10))
|
||||
return sparse.linalg.svds(A, **kwargs)
|
||||
|
||||
def random_rotation(self, **kwargs):
|
||||
return spatial.transform.Rotation.random(3, **kwargs).as_matrix()
|
||||
|
||||
def goodness_of_fit(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
data = rng.random(100)
|
||||
return stats.goodness_of_fit(stats.laplace, data, **kwargs).pvalue
|
||||
|
||||
def permutation_test(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
data = tuple(rng.random((2, 100)))
|
||||
def statistic(x, y, axis): return np.mean(x, axis=axis) - np.mean(y, axis=axis)
|
||||
return stats.permutation_test(data, statistic, **kwargs).pvalue
|
||||
|
||||
def bootstrap(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
data = (rng.random(100),)
|
||||
return stats.bootstrap(data, np.mean, **kwargs).confidence_interval
|
||||
|
||||
def dunnett(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
x, y, control = rng.random((3, 100))
|
||||
return stats.dunnett(x, y, control=control, **kwargs).pvalue
|
||||
|
||||
def sobol_indices(self, **kwargs):
|
||||
def f_ishigami(x): return (np.sin(x[0]) + 7 * np.sin(x[1]) ** 2
|
||||
+ 0.1 * (x[2] ** 4) * np.sin(x[0]))
|
||||
dists = [stats.uniform(loc=-np.pi, scale=2 * np.pi),
|
||||
stats.uniform(loc=-np.pi, scale=2 * np.pi),
|
||||
stats.uniform(loc=-np.pi, scale=2 * np.pi)]
|
||||
res = stats.sobol_indices(func=f_ishigami, n=1024, dists=dists, **kwargs)
|
||||
return res.first_order
|
||||
|
||||
def qmc_engine(self, engine, **kwargs):
|
||||
qrng = engine(d=1, **kwargs)
|
||||
return qrng.random(4)
|
||||
|
||||
def halton(self, **kwargs):
|
||||
return self.qmc_engine(stats.qmc.Halton, **kwargs)
|
||||
|
||||
def sobol(self, **kwargs):
|
||||
return self.qmc_engine(stats.qmc.Sobol, **kwargs)
|
||||
|
||||
def latin_hypercube(self, **kwargs):
|
||||
return self.qmc_engine(stats.qmc.LatinHypercube, **kwargs)
|
||||
|
||||
def poisson_disk(self, **kwargs):
|
||||
return self.qmc_engine(stats.qmc.PoissonDisk, **kwargs)
|
||||
|
||||
def multivariate_normal_qmc(self, **kwargs):
|
||||
X = stats.qmc.MultivariateNormalQMC([0], **kwargs)
|
||||
return X.random(4)
|
||||
|
||||
def multinomial_qmc(self, **kwargs):
|
||||
X = stats.qmc.MultinomialQMC([0.5, 0.5], 4, **kwargs)
|
||||
return X.random(4)
|
||||
|
||||
def permutation_method(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
data = tuple(rng.random((2, 100)))
|
||||
method = stats.PermutationMethod(**kwargs)
|
||||
return stats.pearsonr(*data, method=method).pvalue
|
||||
|
||||
def bootstrap_method(self, **kwargs):
|
||||
rng = np.random.default_rng(3458934594269824562)
|
||||
data = tuple(rng.random((2, 100)))
|
||||
res = stats.pearsonr(*data)
|
||||
method = stats.BootstrapMethod(**kwargs)
|
||||
return res.confidence_interval(method=method)
|
||||
|
||||
@pytest.mark.fail_slow(10)
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("method, arg_name", [
|
||||
(kmeans, "seed"),
|
||||
(kmeans2, "seed"),
|
||||
(barycentric, "random_state"),
|
||||
(clarkson_woodruff_transform, "seed"),
|
||||
(basinhopping, "seed"),
|
||||
(differential_evolution, "seed"),
|
||||
(dual_annealing, "seed"),
|
||||
(check_grad, "seed"),
|
||||
(random_array, 'random_state'),
|
||||
(random, 'random_state'),
|
||||
(rand, 'random_state'),
|
||||
(svds, "random_state"),
|
||||
(random_rotation, "random_state"),
|
||||
(goodness_of_fit, "random_state"),
|
||||
(permutation_test, "random_state"),
|
||||
(bootstrap, "random_state"),
|
||||
(permutation_method, "random_state"),
|
||||
(bootstrap_method, "random_state"),
|
||||
(dunnett, "random_state"),
|
||||
(sobol_indices, "random_state"),
|
||||
(halton, "seed"),
|
||||
(sobol, "seed"),
|
||||
(latin_hypercube, "seed"),
|
||||
(poisson_disk, "seed"),
|
||||
(multivariate_normal_qmc, "seed"),
|
||||
(multinomial_qmc, "seed"),
|
||||
])
|
||||
def test_rng_deterministic(self, method, arg_name):
|
||||
np.random.seed(None)
|
||||
seed = 2949672964
|
||||
|
||||
rng = np.random.default_rng(seed)
|
||||
message = "got multiple values for argument now known as `rng`"
|
||||
with pytest.raises(TypeError, match=message):
|
||||
method(self, **{'rng': rng, arg_name: seed})
|
||||
|
||||
rng = np.random.default_rng(seed)
|
||||
res1 = method(self, rng=rng)
|
||||
res2 = method(self, rng=seed)
|
||||
assert_equal(res2, res1)
|
||||
|
||||
if method.__name__ in {"dunnett", "sobol_indices"}:
|
||||
# the two kwargs have essentially the same behavior for these functions
|
||||
res3 = method(self, **{arg_name: seed})
|
||||
assert_equal(res3, res1)
|
||||
return
|
||||
|
||||
rng = np.random.RandomState(seed)
|
||||
res1 = method(self, **{arg_name: rng})
|
||||
res2 = method(self, **{arg_name: seed})
|
||||
|
||||
if method.__name__ in {"halton", "sobol", "latin_hypercube", "poisson_disk",
|
||||
"multivariate_normal_qmc", "multinomial_qmc"}:
|
||||
# For these, passing `random_state=RandomState(seed)` is not the same as
|
||||
# passing integer `seed`.
|
||||
res1b = method(self, **{arg_name: np.random.RandomState(seed)})
|
||||
assert_equal(res1b, res1)
|
||||
res2b = method(self, **{arg_name: seed})
|
||||
assert_equal(res2b, res2)
|
||||
return
|
||||
|
||||
np.random.seed(seed)
|
||||
res3 = method(self, **{arg_name: None})
|
||||
assert_equal(res2, res1)
|
||||
assert_equal(res3, res1)
|
||||
@@ -0,0 +1,322 @@
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from scipy._lib._array_api import (
|
||||
_GLOBAL_CONFIG, array_namespace, _asarray, xp_copy, xp_assert_equal, is_numpy,
|
||||
np_compat, xp_default_dtype, xp_result_type, is_torch
|
||||
)
|
||||
from scipy._lib import array_api_extra as xpx
|
||||
from scipy._lib._array_api_no_0d import xp_assert_equal as xp_assert_equal_no_0d
|
||||
from scipy._lib.array_api_extra.testing import lazy_xp_function
|
||||
|
||||
|
||||
lazy_xp_function(_asarray)
|
||||
lazy_xp_function(xp_copy)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _GLOBAL_CONFIG["SCIPY_ARRAY_API"],
|
||||
reason="Array API test; set environment variable SCIPY_ARRAY_API=1 to run it")
|
||||
class TestArrayAPI:
|
||||
|
||||
def test_array_namespace(self):
|
||||
x, y = np.array([0, 1, 2]), np.array([0, 1, 2])
|
||||
xp = array_namespace(x, y)
|
||||
assert 'array_api_compat.numpy' in xp.__name__
|
||||
|
||||
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = False
|
||||
xp = array_namespace(x, y)
|
||||
assert 'array_api_compat.numpy' in xp.__name__
|
||||
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = True
|
||||
|
||||
def test_asarray(self, xp):
|
||||
x, y = _asarray([0, 1, 2], xp=xp), _asarray(np.arange(3), xp=xp)
|
||||
ref = xp.asarray([0, 1, 2])
|
||||
xp_assert_equal(x, ref)
|
||||
xp_assert_equal(y, ref)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore: the matrix subclass")
|
||||
def test_raises(self):
|
||||
msg = "of type `numpy.ma.MaskedArray` are not supported"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
array_namespace(np.ma.array(1), np.array(1))
|
||||
|
||||
msg = "of type `numpy.matrix` are not supported"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
array_namespace(np.array(1), np.matrix(1))
|
||||
|
||||
msg = "only boolean and numerical dtypes are supported"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
array_namespace([object()])
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
array_namespace('abc')
|
||||
|
||||
@pytest.mark.skip_xp_backends(np_only=True, reason="Array-likes")
|
||||
def test_array_likes(self, xp):
|
||||
"""Test that if all parameters of array_namespace are Array-likes,
|
||||
the output is array_api_compat.numpy
|
||||
"""
|
||||
assert array_namespace([0, 1, 2]) is xp
|
||||
assert array_namespace((0, 1, 2)) is xp
|
||||
assert array_namespace(1, 2, 3) is xp
|
||||
assert array_namespace(1) is xp
|
||||
assert array_namespace(np.int64(1)) is xp
|
||||
assert array_namespace([0, 1, 2], 3) is xp
|
||||
assert array_namespace() is xp
|
||||
assert array_namespace(None) is xp
|
||||
assert array_namespace(1, None) is xp
|
||||
assert array_namespace(None, 1) is xp
|
||||
|
||||
# This only works when xp is numpy!
|
||||
assert array_namespace(np.asarray([1, 2]), [3, 4]) is xp
|
||||
assert array_namespace(np.int64(1), [3, 4]) is xp
|
||||
|
||||
def test_array_and_array_likes_mix(self, xp):
|
||||
"""Test that if there is at least one Array API object among
|
||||
the parameters of array_namespace, and all other parameters
|
||||
are scalars, the output is its namespace.
|
||||
|
||||
If there are non-scalar Array-Likes, raise as in array-api-compat.
|
||||
"""
|
||||
x = xp.asarray(1)
|
||||
assert array_namespace(x) is xp
|
||||
assert array_namespace(x, 1) is xp
|
||||
assert array_namespace(1, x) is xp
|
||||
assert array_namespace(None, x) is xp
|
||||
|
||||
if not is_numpy(xp):
|
||||
with pytest.raises(TypeError, match="Multiple namespaces"):
|
||||
array_namespace(x, [1, 2])
|
||||
with pytest.raises(TypeError, match="Multiple namespaces"):
|
||||
array_namespace(x, np.int64(1))
|
||||
|
||||
def test_array_api_extra_hook(self):
|
||||
"""Test that the `array_namespace` function used by
|
||||
array-api-extra has been overridden by scipy
|
||||
"""
|
||||
msg = "only boolean and numerical dtypes are supported"
|
||||
with pytest.raises(TypeError, match=msg):
|
||||
xpx.atleast_nd("abc", ndim=0)
|
||||
|
||||
def test_copy(self, xp):
|
||||
for _xp in [xp, None]:
|
||||
x = xp.asarray([1, 2, 3])
|
||||
y = xp_copy(x, xp=_xp)
|
||||
# with numpy we'd want to use np.shared_memory, but that's not specified
|
||||
# in the array-api
|
||||
assert id(x) != id(y)
|
||||
try:
|
||||
y[0] = 10
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
else:
|
||||
assert x[0] != y[0]
|
||||
|
||||
@pytest.mark.parametrize('dtype', ['int32', 'int64', 'float32', 'float64'])
|
||||
@pytest.mark.parametrize('shape', [(), (3,)])
|
||||
def test_strict_checks(self, xp, dtype, shape):
|
||||
# Check that `_strict_check` behaves as expected
|
||||
dtype = getattr(xp, dtype)
|
||||
x = xp.broadcast_to(xp.asarray(1, dtype=dtype), shape)
|
||||
x = x if shape else x[()]
|
||||
y = np_compat.asarray(1)[()]
|
||||
|
||||
kwarg_names = ["check_namespace", "check_dtype", "check_shape", "check_0d"]
|
||||
options = dict(zip(kwarg_names, [True, False, False, False]))
|
||||
if is_numpy(xp):
|
||||
xp_assert_equal(x, y, **options)
|
||||
else:
|
||||
with pytest.raises(
|
||||
AssertionError,
|
||||
match="Namespace of desired array does not match",
|
||||
):
|
||||
xp_assert_equal(x, y, **options)
|
||||
with pytest.raises(
|
||||
AssertionError,
|
||||
match="Namespace of actual and desired arrays do not match",
|
||||
):
|
||||
xp_assert_equal(y, x, **options)
|
||||
|
||||
options = dict(zip(kwarg_names, [False, True, False, False]))
|
||||
if y.dtype.name in str(x.dtype):
|
||||
xp_assert_equal(x, y, **options)
|
||||
else:
|
||||
with pytest.raises(AssertionError, match="dtypes do not match."):
|
||||
xp_assert_equal(x, y, **options)
|
||||
|
||||
options = dict(zip(kwarg_names, [False, False, True, False]))
|
||||
if x.shape == y.shape:
|
||||
xp_assert_equal(x, y, **options)
|
||||
else:
|
||||
with pytest.raises(AssertionError, match="Shapes do not match."):
|
||||
xp_assert_equal(x, xp.asarray(y), **options)
|
||||
|
||||
options = dict(zip(kwarg_names, [False, False, False, True]))
|
||||
if is_numpy(xp) and x.shape == y.shape:
|
||||
xp_assert_equal(x, y, **options)
|
||||
elif is_numpy(xp):
|
||||
with pytest.raises(AssertionError, match="Array-ness does not match."):
|
||||
xp_assert_equal(x, y, **options)
|
||||
|
||||
@pytest.mark.skip_xp_backends(np_only=True, reason="Scalars only exist in NumPy")
|
||||
def test_check_scalar(self, xp):
|
||||
# identity always passes
|
||||
xp_assert_equal(xp.float64(0), xp.float64(0))
|
||||
xp_assert_equal(xp.asarray(0.), xp.asarray(0.))
|
||||
xp_assert_equal(xp.float64(0), xp.float64(0), check_0d=False)
|
||||
xp_assert_equal(xp.asarray(0.), xp.asarray(0.), check_0d=False)
|
||||
|
||||
# Check default convention: 0d-arrays are distinguished from scalars
|
||||
message = "Array-ness does not match:.*"
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal(xp.asarray(0.), xp.float64(0))
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal(xp.float64(0), xp.asarray(0.))
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal(xp.asarray(42), xp.int64(42))
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal(xp.int64(42), xp.asarray(42))
|
||||
|
||||
# with `check_0d=False`, scalars-vs-0d passes (if values match)
|
||||
xp_assert_equal(xp.asarray(0.), xp.float64(0), check_0d=False)
|
||||
xp_assert_equal(xp.float64(0), xp.asarray(0.), check_0d=False)
|
||||
# also with regular python objects
|
||||
xp_assert_equal(xp.asarray(0.), 0., check_0d=False)
|
||||
xp_assert_equal(0., xp.asarray(0.), check_0d=False)
|
||||
xp_assert_equal(xp.asarray(42), 42, check_0d=False)
|
||||
xp_assert_equal(42, xp.asarray(42), check_0d=False)
|
||||
|
||||
# as an alternative to `check_0d=False`, explicitly expect scalar
|
||||
xp_assert_equal(xp.float64(0), xp.asarray(0.)[()])
|
||||
|
||||
@pytest.mark.skip_xp_backends(np_only=True, reason="Scalars only exist in NumPy")
|
||||
def test_check_scalar_no_0d(self, xp):
|
||||
# identity passes, if first argument is not 0d (or check_0d=True)
|
||||
xp_assert_equal_no_0d(xp.float64(0), xp.float64(0))
|
||||
xp_assert_equal_no_0d(xp.float64(0), xp.float64(0), check_0d=True)
|
||||
xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.), check_0d=True)
|
||||
|
||||
# by default, 0d values are forbidden as the first argument
|
||||
message = "Result is a NumPy 0d-array.*"
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.))
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0))
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal_no_0d(xp.asarray(42), xp.int64(42))
|
||||
|
||||
# Check default convention: 0d-arrays are NOT distinguished from scalars
|
||||
xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.))
|
||||
xp_assert_equal_no_0d(xp.int64(42), xp.asarray(42))
|
||||
|
||||
# opt in to 0d-check remains possible
|
||||
message = "Array-ness does not match:.*"
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0), check_0d=True)
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.), check_0d=True)
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal_no_0d(xp.asarray(42), xp.int64(0), check_0d=True)
|
||||
with pytest.raises(AssertionError, match=message):
|
||||
xp_assert_equal_no_0d(xp.int64(0), xp.asarray(42), check_0d=True)
|
||||
|
||||
# scalars-vs-0d passes (if values match) also with regular python objects
|
||||
xp_assert_equal_no_0d(0., xp.asarray(0.))
|
||||
xp_assert_equal_no_0d(42, xp.asarray(42))
|
||||
|
||||
def test_default_dtype(self, xp):
|
||||
assert xp_default_dtype(xp) == xp.asarray(1.).dtype
|
||||
|
||||
|
||||
scalars = [1, 1., 1. + 1j]
|
||||
lists = [[1], [1.], [1. + 1j]]
|
||||
types = ('int8 int16 int32 int64 '
|
||||
'uint8 uint16 uint32 uint64 '
|
||||
'float32 float64 complex64 complex128').split()
|
||||
arrays = [np.asarray([1], dtype=getattr(np, t)) for t in types]
|
||||
|
||||
|
||||
def convert_type(x, xp):
|
||||
# Convert NumPy array to xp-array
|
||||
# Convert string to indicated dtype from xp
|
||||
# Return Python scalars unchanged
|
||||
if isinstance(x, np.ndarray):
|
||||
return xp.asarray(x)
|
||||
elif isinstance(x, str):
|
||||
return getattr(xp, x)
|
||||
return x
|
||||
|
||||
|
||||
def is_inexact(x, xp):
|
||||
# Determine whether `x` is of inexact (real of complex floating) dtype
|
||||
x = xp.asarray(x) if np.isscalar(x) or isinstance(x, list) else x
|
||||
dtype = getattr(x, 'dtype', x)
|
||||
return xp.isdtype(dtype, ('real floating', 'complex floating'))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('x', scalars + lists + types + arrays)
|
||||
@pytest.mark.parametrize('y', scalars + lists + types + arrays)
|
||||
def test_xp_result_type_no_force(x, y, xp):
|
||||
# When force_floating==False (default), behavior of `xp_result_type`
|
||||
# should match that of `xp.result_type` on the same arguments after
|
||||
# converting lists to arrays of type `xp`.
|
||||
x = convert_type(x, xp)
|
||||
y = convert_type(y, xp)
|
||||
x_ref = xp.asarray(x) if isinstance(x, list) else x
|
||||
y_ref = xp.asarray(y) if isinstance(y, list) else y
|
||||
|
||||
try:
|
||||
dtype_ref = xp.result_type(x_ref, y_ref)
|
||||
expected_error = None
|
||||
except Exception as e:
|
||||
expected_error = (type(e), str(e))
|
||||
|
||||
if expected_error is not None:
|
||||
with pytest.raises(expected_error[0], match=re.escape(expected_error[1])):
|
||||
xp_result_type(x, y, xp=xp)
|
||||
return
|
||||
|
||||
dtype_res = xp_result_type(x, y, xp=xp)
|
||||
assert dtype_res == dtype_ref
|
||||
|
||||
|
||||
@pytest.mark.parametrize('x', scalars + lists + types + arrays)
|
||||
@pytest.mark.parametrize('y', scalars + lists + types + arrays)
|
||||
def test_xp_result_type_force_floating(x, y, xp):
|
||||
# When `force_floating==True`, behavior of `xp_result_type`
|
||||
# should match that of `xp.result_type` with `1.0` appended to the set of
|
||||
# arguments (after converting lists to arrays of type `xp`).
|
||||
# If this raises a `TypeError`, which is the case when the result
|
||||
# type is not defined by the standard, the result type should be
|
||||
# the result type of any inexact (real or complex floating) arguments
|
||||
# and the default floating point type.
|
||||
if (is_torch(xp) and not(isinstance(x, str) or isinstance(y, str))
|
||||
and np.isscalar(x) and np.isscalar(y)):
|
||||
pytest.skip("See 3/27/2024 comment at data-apis/array-api-compat#277")
|
||||
|
||||
x = convert_type(x, xp)
|
||||
y = convert_type(y, xp)
|
||||
x_ref = xp.asarray(x) if isinstance(x, list) else x
|
||||
y_ref = xp.asarray(y) if isinstance(y, list) else y
|
||||
|
||||
expected_error = None
|
||||
try:
|
||||
dtype_ref = xp.result_type(x_ref, y_ref, 1.0)
|
||||
except TypeError:
|
||||
args = []
|
||||
if is_inexact(x_ref, xp):
|
||||
args.append(x_ref)
|
||||
if is_inexact(y_ref, xp):
|
||||
args.append(y_ref)
|
||||
dtype_ref = xp.result_type(*args, xp.asarray(1.0))
|
||||
except Exception as e:
|
||||
expected_error = (type(e), str(e))
|
||||
|
||||
if expected_error is not None:
|
||||
with pytest.raises(expected_error[0], match=expected_error[1]):
|
||||
xp_result_type(x, y, xp=xp)
|
||||
return
|
||||
|
||||
dtype_res = xp_result_type(x, y, force_floating=True, xp=xp)
|
||||
assert dtype_res == dtype_ref
|
||||
169
venv/lib/python3.12/site-packages/scipy/_lib/tests/test_bunch.py
Normal file
169
venv/lib/python3.12/site-packages/scipy/_lib/tests/test_bunch.py
Normal file
@@ -0,0 +1,169 @@
|
||||
import pytest
|
||||
import pickle
|
||||
from numpy.testing import assert_equal
|
||||
from scipy._lib._bunch import _make_tuple_bunch
|
||||
|
||||
|
||||
# `Result` is defined at the top level of the module so it can be
|
||||
# used to test pickling.
|
||||
Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta'])
|
||||
|
||||
|
||||
class TestMakeTupleBunch:
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# Tests with Result
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
def setup_method(self):
|
||||
# Set up an instance of Result.
|
||||
self.result = Result(x=1, y=2, z=3, w=99, beta=0.5)
|
||||
|
||||
def test_attribute_access(self):
|
||||
assert_equal(self.result.x, 1)
|
||||
assert_equal(self.result.y, 2)
|
||||
assert_equal(self.result.z, 3)
|
||||
assert_equal(self.result.w, 99)
|
||||
assert_equal(self.result.beta, 0.5)
|
||||
|
||||
def test_indexing(self):
|
||||
assert_equal(self.result[0], 1)
|
||||
assert_equal(self.result[1], 2)
|
||||
assert_equal(self.result[2], 3)
|
||||
assert_equal(self.result[-1], 3)
|
||||
with pytest.raises(IndexError, match='index out of range'):
|
||||
self.result[3]
|
||||
|
||||
def test_unpacking(self):
|
||||
x0, y0, z0 = self.result
|
||||
assert_equal((x0, y0, z0), (1, 2, 3))
|
||||
assert_equal(self.result, (1, 2, 3))
|
||||
|
||||
def test_slice(self):
|
||||
assert_equal(self.result[1:], (2, 3))
|
||||
assert_equal(self.result[::2], (1, 3))
|
||||
assert_equal(self.result[::-1], (3, 2, 1))
|
||||
|
||||
def test_len(self):
|
||||
assert_equal(len(self.result), 3)
|
||||
|
||||
def test_repr(self):
|
||||
s = repr(self.result)
|
||||
assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)')
|
||||
|
||||
def test_hash(self):
|
||||
assert_equal(hash(self.result), hash((1, 2, 3)))
|
||||
|
||||
def test_pickle(self):
|
||||
s = pickle.dumps(self.result)
|
||||
obj = pickle.loads(s)
|
||||
assert isinstance(obj, Result)
|
||||
assert_equal(obj.x, self.result.x)
|
||||
assert_equal(obj.y, self.result.y)
|
||||
assert_equal(obj.z, self.result.z)
|
||||
assert_equal(obj.w, self.result.w)
|
||||
assert_equal(obj.beta, self.result.beta)
|
||||
|
||||
def test_read_only_existing(self):
|
||||
with pytest.raises(AttributeError, match="can't set attribute"):
|
||||
self.result.x = -1
|
||||
|
||||
def test_read_only_new(self):
|
||||
self.result.plate_of_shrimp = "lattice of coincidence"
|
||||
assert self.result.plate_of_shrimp == "lattice of coincidence"
|
||||
|
||||
def test_constructor_missing_parameter(self):
|
||||
with pytest.raises(TypeError, match='missing'):
|
||||
# `w` is missing.
|
||||
Result(x=1, y=2, z=3, beta=0.75)
|
||||
|
||||
def test_constructor_incorrect_parameter(self):
|
||||
with pytest.raises(TypeError, match='unexpected'):
|
||||
# `foo` is not an existing field.
|
||||
Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999)
|
||||
|
||||
def test_module(self):
|
||||
m = 'scipy._lib.tests.test_bunch'
|
||||
assert_equal(Result.__module__, m)
|
||||
assert_equal(self.result.__module__, m)
|
||||
|
||||
def test_extra_fields_per_instance(self):
|
||||
# This test exists to ensure that instances of the same class
|
||||
# store their own values for the extra fields. That is, the values
|
||||
# are stored per instance and not in the class.
|
||||
result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0)
|
||||
result2 = Result(x=4, y=5, z=6, w=99, beta=1.0)
|
||||
assert_equal(result1.w, -1)
|
||||
assert_equal(result1.beta, 0.0)
|
||||
# The rest of these checks aren't essential, but let's check
|
||||
# them anyway.
|
||||
assert_equal(result1[:], (1, 2, 3))
|
||||
assert_equal(result2.w, 99)
|
||||
assert_equal(result2.beta, 1.0)
|
||||
assert_equal(result2[:], (4, 5, 6))
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# Other tests
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
def test_extra_field_names_is_optional(self):
|
||||
Square = _make_tuple_bunch('Square', ['width', 'height'])
|
||||
sq = Square(width=1, height=2)
|
||||
assert_equal(sq.width, 1)
|
||||
assert_equal(sq.height, 2)
|
||||
s = repr(sq)
|
||||
assert_equal(s, 'Square(width=1, height=2)')
|
||||
|
||||
def test_tuple_like(self):
|
||||
Tup = _make_tuple_bunch('Tup', ['a', 'b'])
|
||||
tu = Tup(a=1, b=2)
|
||||
assert isinstance(tu, tuple)
|
||||
assert isinstance(tu + (1,), tuple)
|
||||
|
||||
def test_explicit_module(self):
|
||||
m = 'some.module.name'
|
||||
Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m)
|
||||
foo = Foo(x=1, a=355, b=113)
|
||||
assert_equal(Foo.__module__, m)
|
||||
assert_equal(foo.__module__, m)
|
||||
|
||||
def test_passes_polars_checks(self):
|
||||
# gh-22450
|
||||
Square = _make_tuple_bunch('Square', ['width', 'height'])
|
||||
assert hasattr(Square, '_replace')
|
||||
assert hasattr(Square, '_field_defaults')
|
||||
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# Argument validation
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
@pytest.mark.parametrize('args', [('123', ['a'], ['b']),
|
||||
('Foo', ['-3'], ['x']),
|
||||
('Foo', ['a'], ['+-*/'])])
|
||||
def test_identifiers_not_allowed(self, args):
|
||||
with pytest.raises(ValueError, match='identifiers'):
|
||||
_make_tuple_bunch(*args)
|
||||
|
||||
@pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']),
|
||||
('Foo', ['a', 'b'], ['b', 'x'])])
|
||||
def test_repeated_field_names(self, args):
|
||||
with pytest.raises(ValueError, match='Duplicate'):
|
||||
_make_tuple_bunch(*args)
|
||||
|
||||
@pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']),
|
||||
('Foo', ['a'], ['_x'])])
|
||||
def test_leading_underscore_not_allowed(self, args):
|
||||
with pytest.raises(ValueError, match='underscore'):
|
||||
_make_tuple_bunch(*args)
|
||||
|
||||
@pytest.mark.parametrize('args', [('Foo', ['def'], ['x']),
|
||||
('Foo', ['a'], ['or']),
|
||||
('and', ['a'], ['x'])])
|
||||
def test_keyword_not_allowed_in_fields(self, args):
|
||||
with pytest.raises(ValueError, match='keyword'):
|
||||
_make_tuple_bunch(*args)
|
||||
|
||||
def test_at_least_one_field_name_required(self):
|
||||
with pytest.raises(ValueError, match='at least one name'):
|
||||
_make_tuple_bunch('Qwerty', [], ['a', 'b'])
|
||||
@@ -0,0 +1,196 @@
|
||||
from numpy.testing import assert_equal, assert_
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
import time
|
||||
import pytest
|
||||
import ctypes
|
||||
import threading
|
||||
from scipy._lib import _ccallback_c as _test_ccallback_cython
|
||||
from scipy._lib import _test_ccallback
|
||||
from scipy._lib._ccallback import LowLevelCallable
|
||||
|
||||
|
||||
ERROR_VALUE = 2.0
|
||||
|
||||
|
||||
def callback_python(a, user_data=None):
|
||||
if a == ERROR_VALUE:
|
||||
raise ValueError("bad value")
|
||||
|
||||
if user_data is None:
|
||||
return a + 1
|
||||
else:
|
||||
return a + user_data
|
||||
|
||||
def _get_cffi_func(base, signature):
|
||||
cffi = pytest.importorskip("cffi")
|
||||
|
||||
# Get function address
|
||||
voidp = ctypes.cast(base, ctypes.c_void_p)
|
||||
address = voidp.value
|
||||
|
||||
# Create corresponding cffi handle
|
||||
ffi = cffi.FFI()
|
||||
func = ffi.cast(signature, address)
|
||||
return func
|
||||
|
||||
|
||||
def _get_ctypes_data():
|
||||
value = ctypes.c_double(2.0)
|
||||
return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
|
||||
|
||||
|
||||
def _get_cffi_data():
|
||||
cffi = pytest.importorskip("cffi")
|
||||
ffi = cffi.FFI()
|
||||
return ffi.new('double *', 2.0)
|
||||
|
||||
|
||||
CALLERS = {
|
||||
'simple': _test_ccallback.test_call_simple,
|
||||
'nodata': _test_ccallback.test_call_nodata,
|
||||
'nonlocal': _test_ccallback.test_call_nonlocal,
|
||||
'cython': _test_ccallback_cython.test_call_cython,
|
||||
}
|
||||
|
||||
# These functions have signatures known to the callers
|
||||
FUNCS = {
|
||||
'python': lambda: callback_python,
|
||||
'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
|
||||
'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
|
||||
"plus1_cython"),
|
||||
'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
|
||||
'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
|
||||
'double (*)(double, int *, void *)'),
|
||||
'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
|
||||
'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
|
||||
"plus1b_cython"),
|
||||
'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
|
||||
'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
|
||||
'double (*)(double, double, int *, void *)'),
|
||||
}
|
||||
|
||||
# These functions have signatures the callers don't know
|
||||
BAD_FUNCS = {
|
||||
'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
|
||||
'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython,
|
||||
"plus1bc_cython"),
|
||||
'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
|
||||
'cffi_bc': lambda: _get_cffi_func(
|
||||
_test_ccallback_cython.plus1bc_ctypes,
|
||||
'double (*)(double, double, double, int *, void *)'
|
||||
),
|
||||
}
|
||||
|
||||
USER_DATAS = {
|
||||
'ctypes': _get_ctypes_data,
|
||||
'cffi': _get_cffi_data,
|
||||
'capsule': _test_ccallback.test_get_data_capsule,
|
||||
}
|
||||
|
||||
|
||||
def test_callbacks():
|
||||
def check(caller, func, user_data):
|
||||
caller = CALLERS[caller]
|
||||
func = FUNCS[func]()
|
||||
user_data = USER_DATAS[user_data]()
|
||||
|
||||
if func is callback_python:
|
||||
def func2(x):
|
||||
return func(x, 2.0)
|
||||
else:
|
||||
func2 = LowLevelCallable(func, user_data)
|
||||
func = LowLevelCallable(func)
|
||||
|
||||
# Test basic call
|
||||
assert_equal(caller(func, 1.0), 2.0)
|
||||
|
||||
# Test 'bad' value resulting to an error
|
||||
assert_raises(ValueError, caller, func, ERROR_VALUE)
|
||||
|
||||
# Test passing in user_data
|
||||
assert_equal(caller(func2, 1.0), 3.0)
|
||||
|
||||
for caller in sorted(CALLERS.keys()):
|
||||
for func in sorted(FUNCS.keys()):
|
||||
for user_data in sorted(USER_DATAS.keys()):
|
||||
check(caller, func, user_data)
|
||||
|
||||
|
||||
def test_bad_callbacks():
|
||||
def check(caller, func, user_data):
|
||||
caller = CALLERS[caller]
|
||||
user_data = USER_DATAS[user_data]()
|
||||
func = BAD_FUNCS[func]()
|
||||
|
||||
if func is callback_python:
|
||||
def func2(x):
|
||||
return func(x, 2.0)
|
||||
else:
|
||||
func2 = LowLevelCallable(func, user_data)
|
||||
func = LowLevelCallable(func)
|
||||
|
||||
# Test that basic call fails
|
||||
assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
|
||||
|
||||
# Test that passing in user_data also fails
|
||||
assert_raises(ValueError, caller, func2, 1.0)
|
||||
|
||||
# Test error message
|
||||
llfunc = LowLevelCallable(func)
|
||||
try:
|
||||
caller(llfunc, 1.0)
|
||||
except ValueError as err:
|
||||
msg = str(err)
|
||||
assert_(llfunc.signature in msg, msg)
|
||||
assert_('double (double, double, int *, void *)' in msg, msg)
|
||||
|
||||
for caller in sorted(CALLERS.keys()):
|
||||
for func in sorted(BAD_FUNCS.keys()):
|
||||
for user_data in sorted(USER_DATAS.keys()):
|
||||
check(caller, func, user_data)
|
||||
|
||||
|
||||
def test_signature_override():
|
||||
caller = _test_ccallback.test_call_simple
|
||||
func = _test_ccallback.test_get_plus1_capsule()
|
||||
|
||||
llcallable = LowLevelCallable(func, signature="bad signature")
|
||||
assert_equal(llcallable.signature, "bad signature")
|
||||
assert_raises(ValueError, caller, llcallable, 3)
|
||||
|
||||
llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
|
||||
assert_equal(llcallable.signature, "double (double, int *, void *)")
|
||||
assert_equal(caller(llcallable, 3), 4)
|
||||
|
||||
|
||||
def test_threadsafety():
|
||||
def callback(a, caller):
|
||||
if a <= 0:
|
||||
return 1
|
||||
else:
|
||||
res = caller(lambda x: callback(x, caller), a - 1)
|
||||
return 2*res
|
||||
|
||||
def check(caller):
|
||||
caller = CALLERS[caller]
|
||||
|
||||
results = []
|
||||
|
||||
count = 10
|
||||
|
||||
def run():
|
||||
time.sleep(0.01)
|
||||
r = caller(lambda x: callback(x, caller), count)
|
||||
results.append(r)
|
||||
|
||||
threads = [threading.Thread(target=run) for j in range(20)]
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
assert_equal(results, [2.0**count]*len(threads))
|
||||
|
||||
for caller in CALLERS.keys():
|
||||
check(caller)
|
||||
@@ -0,0 +1,45 @@
|
||||
"""
|
||||
Check the SciPy config is valid.
|
||||
"""
|
||||
import scipy
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
not hasattr(scipy.__config__, "_built_with_meson"),
|
||||
reason="Requires Meson builds",
|
||||
)
|
||||
|
||||
|
||||
class TestSciPyConfigs:
|
||||
REQUIRED_CONFIG_KEYS = [
|
||||
"Compilers",
|
||||
"Machine Information",
|
||||
"Python Information",
|
||||
]
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
@patch("scipy.__config__._check_pyyaml")
|
||||
def test_pyyaml_not_found(self, mock_yaml_importer):
|
||||
mock_yaml_importer.side_effect = ModuleNotFoundError()
|
||||
with pytest.warns(UserWarning):
|
||||
scipy.show_config()
|
||||
|
||||
def test_dict_mode(self):
|
||||
config = scipy.show_config(mode="dicts")
|
||||
|
||||
assert isinstance(config, dict)
|
||||
assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), (
|
||||
"Required key missing,"
|
||||
" see index of `False` with `REQUIRED_CONFIG_KEYS`"
|
||||
)
|
||||
|
||||
def test_invalid_mode(self):
|
||||
with pytest.raises(AttributeError):
|
||||
scipy.show_config(mode="foo")
|
||||
|
||||
def test_warn_to_add_tests(self):
|
||||
assert len(scipy.__config__.DisplayModes) == 2, (
|
||||
"New mode detected,"
|
||||
" please add UT if applicable and increment this count"
|
||||
)
|
||||
@@ -0,0 +1,10 @@
|
||||
import pytest
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_cython_api_deprecation():
|
||||
match = ("`scipy._lib._test_deprecation_def.foo_deprecated` "
|
||||
"is deprecated, use `foo` instead!\n"
|
||||
"Deprecated in Scipy 42.0.0")
|
||||
with pytest.warns(DeprecationWarning, match=match):
|
||||
from .. import _test_deprecation_call
|
||||
assert _test_deprecation_call.call() == (1, 1)
|
||||
@@ -0,0 +1,143 @@
|
||||
''' Some tests for the documenting decorator and support functions '''
|
||||
|
||||
import sys
|
||||
import pytest
|
||||
from numpy.testing import assert_equal, suppress_warnings
|
||||
|
||||
from scipy._lib import doccer
|
||||
|
||||
# python -OO strips docstrings
|
||||
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
|
||||
|
||||
docstring = \
|
||||
"""Docstring
|
||||
%(strtest1)s
|
||||
%(strtest2)s
|
||||
%(strtest3)s
|
||||
"""
|
||||
param_doc1 = \
|
||||
"""Another test
|
||||
with some indent"""
|
||||
|
||||
param_doc2 = \
|
||||
"""Another test, one line"""
|
||||
|
||||
param_doc3 = \
|
||||
""" Another test
|
||||
with some indent"""
|
||||
|
||||
doc_dict = {'strtest1':param_doc1,
|
||||
'strtest2':param_doc2,
|
||||
'strtest3':param_doc3}
|
||||
|
||||
filled_docstring = \
|
||||
"""Docstring
|
||||
Another test
|
||||
with some indent
|
||||
Another test, one line
|
||||
Another test
|
||||
with some indent
|
||||
"""
|
||||
|
||||
|
||||
def test_unindent():
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(category=DeprecationWarning)
|
||||
assert_equal(doccer.unindent_string(param_doc1), param_doc1)
|
||||
assert_equal(doccer.unindent_string(param_doc2), param_doc2)
|
||||
assert_equal(doccer.unindent_string(param_doc3), param_doc1)
|
||||
|
||||
|
||||
def test_unindent_dict():
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(category=DeprecationWarning)
|
||||
d2 = doccer.unindent_dict(doc_dict)
|
||||
assert_equal(d2['strtest1'], doc_dict['strtest1'])
|
||||
assert_equal(d2['strtest2'], doc_dict['strtest2'])
|
||||
assert_equal(d2['strtest3'], doc_dict['strtest1'])
|
||||
|
||||
|
||||
def test_docformat():
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(category=DeprecationWarning)
|
||||
udd = doccer.unindent_dict(doc_dict)
|
||||
formatted = doccer.docformat(docstring, udd)
|
||||
assert_equal(formatted, filled_docstring)
|
||||
single_doc = 'Single line doc %(strtest1)s'
|
||||
formatted = doccer.docformat(single_doc, doc_dict)
|
||||
# Note - initial indent of format string does not
|
||||
# affect subsequent indent of inserted parameter
|
||||
assert_equal(formatted, """Single line doc Another test
|
||||
with some indent""")
|
||||
|
||||
|
||||
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
|
||||
def test_decorator():
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(category=DeprecationWarning)
|
||||
# with unindentation of parameters
|
||||
decorator = doccer.filldoc(doc_dict, True)
|
||||
|
||||
@decorator
|
||||
def func():
|
||||
""" Docstring
|
||||
%(strtest3)s
|
||||
"""
|
||||
|
||||
def expected():
|
||||
""" Docstring
|
||||
Another test
|
||||
with some indent
|
||||
"""
|
||||
assert_equal(func.__doc__, expected.__doc__)
|
||||
|
||||
# without unindentation of parameters
|
||||
|
||||
# The docstring should be unindented for Python 3.13+
|
||||
# because of https://github.com/python/cpython/issues/81283
|
||||
decorator = doccer.filldoc(doc_dict, False if \
|
||||
sys.version_info < (3, 13) else True)
|
||||
|
||||
@decorator
|
||||
def func():
|
||||
""" Docstring
|
||||
%(strtest3)s
|
||||
"""
|
||||
def expected():
|
||||
""" Docstring
|
||||
Another test
|
||||
with some indent
|
||||
"""
|
||||
assert_equal(func.__doc__, expected.__doc__)
|
||||
|
||||
|
||||
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
|
||||
def test_inherit_docstring_from():
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(category=DeprecationWarning)
|
||||
|
||||
class Foo:
|
||||
def func(self):
|
||||
'''Do something useful.'''
|
||||
return
|
||||
|
||||
def func2(self):
|
||||
'''Something else.'''
|
||||
|
||||
class Bar(Foo):
|
||||
@doccer.inherit_docstring_from(Foo)
|
||||
def func(self):
|
||||
'''%(super)sABC'''
|
||||
return
|
||||
|
||||
@doccer.inherit_docstring_from(Foo)
|
||||
def func2(self):
|
||||
# No docstring.
|
||||
return
|
||||
|
||||
assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC')
|
||||
assert_equal(Bar.func2.__doc__, Foo.func2.__doc__)
|
||||
bar = Bar()
|
||||
assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC')
|
||||
assert_equal(bar.func2.__doc__, Foo.func2.__doc__)
|
||||
@@ -0,0 +1,18 @@
|
||||
import pytest
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
from .test_public_api import PUBLIC_MODULES
|
||||
|
||||
# Regression tests for gh-6793.
|
||||
# Check that all modules are importable in a new Python process.
|
||||
# This is not necessarily true if there are import cycles present.
|
||||
|
||||
@pytest.mark.fail_slow(40)
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_public_modules_importable():
|
||||
pids = [subprocess.Popen([sys.executable, '-c', f'import {module}'])
|
||||
for module in PUBLIC_MODULES]
|
||||
for i, pid in enumerate(pids):
|
||||
assert pid.wait() == 0, f'Failed to import {PUBLIC_MODULES[i]}'
|
||||
@@ -0,0 +1,482 @@
|
||||
"""
|
||||
This test script is adopted from:
|
||||
https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
|
||||
"""
|
||||
|
||||
import pkgutil
|
||||
import types
|
||||
import importlib
|
||||
import warnings
|
||||
from importlib import import_module
|
||||
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
import scipy
|
||||
|
||||
from scipy.conftest import xp_available_backends
|
||||
|
||||
|
||||
def test_dir_testing():
|
||||
"""Assert that output of dir has only one "testing/tester"
|
||||
attribute without duplicate"""
|
||||
assert len(dir(scipy)) == len(set(dir(scipy)))
|
||||
|
||||
|
||||
# Historically SciPy has not used leading underscores for private submodules
|
||||
# much. This has resulted in lots of things that look like public modules
|
||||
# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
|
||||
# but were never intended to be public. The PUBLIC_MODULES list contains
|
||||
# modules that are either public because they were meant to be, or because they
|
||||
# contain public functions/objects that aren't present in any other namespace
|
||||
# for whatever reason and therefore should be treated as public.
|
||||
PUBLIC_MODULES = ["scipy." + s for s in [
|
||||
"cluster",
|
||||
"cluster.vq",
|
||||
"cluster.hierarchy",
|
||||
"constants",
|
||||
"datasets",
|
||||
"differentiate",
|
||||
"fft",
|
||||
"fftpack",
|
||||
"integrate",
|
||||
"interpolate",
|
||||
"io",
|
||||
"io.arff",
|
||||
"io.matlab",
|
||||
"io.wavfile",
|
||||
"linalg",
|
||||
"linalg.blas",
|
||||
"linalg.cython_blas",
|
||||
"linalg.lapack",
|
||||
"linalg.cython_lapack",
|
||||
"linalg.interpolative",
|
||||
"ndimage",
|
||||
"odr",
|
||||
"optimize",
|
||||
"optimize.elementwise",
|
||||
"signal",
|
||||
"signal.windows",
|
||||
"sparse",
|
||||
"sparse.linalg",
|
||||
"sparse.csgraph",
|
||||
"spatial",
|
||||
"spatial.distance",
|
||||
"spatial.transform",
|
||||
"special",
|
||||
"stats",
|
||||
"stats.contingency",
|
||||
"stats.distributions",
|
||||
"stats.mstats",
|
||||
"stats.qmc",
|
||||
"stats.sampling"
|
||||
]]
|
||||
|
||||
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that lacked underscores
|
||||
# in their name and hence looked public, but weren't meant to be. All these
|
||||
# namespace were deprecated in the 1.8.0 release - see "clear split between
|
||||
# public and private API" in the 1.8.0 release notes.
|
||||
# These private modules support will be removed in SciPy v2.0.0, as the
|
||||
# deprecation messages emitted by each of these modules say.
|
||||
PRIVATE_BUT_PRESENT_MODULES = [
|
||||
'scipy.constants.codata',
|
||||
'scipy.constants.constants',
|
||||
'scipy.fftpack.basic',
|
||||
'scipy.fftpack.convolve',
|
||||
'scipy.fftpack.helper',
|
||||
'scipy.fftpack.pseudo_diffs',
|
||||
'scipy.fftpack.realtransforms',
|
||||
'scipy.integrate.dop',
|
||||
'scipy.integrate.lsoda',
|
||||
'scipy.integrate.odepack',
|
||||
'scipy.integrate.quadpack',
|
||||
'scipy.integrate.vode',
|
||||
'scipy.interpolate.dfitpack',
|
||||
'scipy.interpolate.fitpack',
|
||||
'scipy.interpolate.fitpack2',
|
||||
'scipy.interpolate.interpnd',
|
||||
'scipy.interpolate.interpolate',
|
||||
'scipy.interpolate.ndgriddata',
|
||||
'scipy.interpolate.polyint',
|
||||
'scipy.interpolate.rbf',
|
||||
'scipy.io.arff.arffread',
|
||||
'scipy.io.harwell_boeing',
|
||||
'scipy.io.idl',
|
||||
'scipy.io.matlab.byteordercodes',
|
||||
'scipy.io.matlab.mio',
|
||||
'scipy.io.matlab.mio4',
|
||||
'scipy.io.matlab.mio5',
|
||||
'scipy.io.matlab.mio5_params',
|
||||
'scipy.io.matlab.mio5_utils',
|
||||
'scipy.io.matlab.mio_utils',
|
||||
'scipy.io.matlab.miobase',
|
||||
'scipy.io.matlab.streams',
|
||||
'scipy.io.mmio',
|
||||
'scipy.io.netcdf',
|
||||
'scipy.linalg.basic',
|
||||
'scipy.linalg.decomp',
|
||||
'scipy.linalg.decomp_cholesky',
|
||||
'scipy.linalg.decomp_lu',
|
||||
'scipy.linalg.decomp_qr',
|
||||
'scipy.linalg.decomp_schur',
|
||||
'scipy.linalg.decomp_svd',
|
||||
'scipy.linalg.matfuncs',
|
||||
'scipy.linalg.misc',
|
||||
'scipy.linalg.special_matrices',
|
||||
'scipy.misc',
|
||||
'scipy.misc.common',
|
||||
'scipy.misc.doccer',
|
||||
'scipy.ndimage.filters',
|
||||
'scipy.ndimage.fourier',
|
||||
'scipy.ndimage.interpolation',
|
||||
'scipy.ndimage.measurements',
|
||||
'scipy.ndimage.morphology',
|
||||
'scipy.odr.models',
|
||||
'scipy.odr.odrpack',
|
||||
'scipy.optimize.cobyla',
|
||||
'scipy.optimize.cython_optimize',
|
||||
'scipy.optimize.lbfgsb',
|
||||
'scipy.optimize.linesearch',
|
||||
'scipy.optimize.minpack',
|
||||
'scipy.optimize.minpack2',
|
||||
'scipy.optimize.moduleTNC',
|
||||
'scipy.optimize.nonlin',
|
||||
'scipy.optimize.optimize',
|
||||
'scipy.optimize.slsqp',
|
||||
'scipy.optimize.tnc',
|
||||
'scipy.optimize.zeros',
|
||||
'scipy.signal.bsplines',
|
||||
'scipy.signal.filter_design',
|
||||
'scipy.signal.fir_filter_design',
|
||||
'scipy.signal.lti_conversion',
|
||||
'scipy.signal.ltisys',
|
||||
'scipy.signal.signaltools',
|
||||
'scipy.signal.spectral',
|
||||
'scipy.signal.spline',
|
||||
'scipy.signal.waveforms',
|
||||
'scipy.signal.wavelets',
|
||||
'scipy.signal.windows.windows',
|
||||
'scipy.sparse.base',
|
||||
'scipy.sparse.bsr',
|
||||
'scipy.sparse.compressed',
|
||||
'scipy.sparse.construct',
|
||||
'scipy.sparse.coo',
|
||||
'scipy.sparse.csc',
|
||||
'scipy.sparse.csr',
|
||||
'scipy.sparse.data',
|
||||
'scipy.sparse.dia',
|
||||
'scipy.sparse.dok',
|
||||
'scipy.sparse.extract',
|
||||
'scipy.sparse.lil',
|
||||
'scipy.sparse.linalg.dsolve',
|
||||
'scipy.sparse.linalg.eigen',
|
||||
'scipy.sparse.linalg.interface',
|
||||
'scipy.sparse.linalg.isolve',
|
||||
'scipy.sparse.linalg.matfuncs',
|
||||
'scipy.sparse.sparsetools',
|
||||
'scipy.sparse.spfuncs',
|
||||
'scipy.sparse.sputils',
|
||||
'scipy.spatial.ckdtree',
|
||||
'scipy.spatial.kdtree',
|
||||
'scipy.spatial.qhull',
|
||||
'scipy.spatial.transform.rotation',
|
||||
'scipy.special.add_newdocs',
|
||||
'scipy.special.basic',
|
||||
'scipy.special.cython_special',
|
||||
'scipy.special.orthogonal',
|
||||
'scipy.special.sf_error',
|
||||
'scipy.special.specfun',
|
||||
'scipy.special.spfun_stats',
|
||||
'scipy.stats.biasedurn',
|
||||
'scipy.stats.kde',
|
||||
'scipy.stats.morestats',
|
||||
'scipy.stats.mstats_basic',
|
||||
'scipy.stats.mstats_extras',
|
||||
'scipy.stats.mvn',
|
||||
'scipy.stats.stats',
|
||||
]
|
||||
|
||||
|
||||
def is_unexpected(name):
|
||||
"""Check if this needs to be considered."""
|
||||
if '._' in name or '.tests' in name or '.setup' in name:
|
||||
return False
|
||||
|
||||
if name in PUBLIC_MODULES:
|
||||
return False
|
||||
|
||||
if name in PRIVATE_BUT_PRESENT_MODULES:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
SKIP_LIST = [
|
||||
'scipy.conftest',
|
||||
'scipy.version',
|
||||
'scipy.special.libsf_error_state'
|
||||
]
|
||||
|
||||
|
||||
# XXX: this test does more than it says on the tin - in using `pkgutil.walk_packages`,
|
||||
# it will raise if it encounters any exceptions which are not handled by `ignore_errors`
|
||||
# while attempting to import each discovered package.
|
||||
# For now, `ignore_errors` only ignores what is necessary, but this could be expanded -
|
||||
# for example, to all errors from private modules or git subpackages - if desired.
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_all_modules_are_expected():
|
||||
"""
|
||||
Test that we don't add anything that looks like a new public module by
|
||||
accident. Check is based on filenames.
|
||||
"""
|
||||
|
||||
def ignore_errors(name):
|
||||
# if versions of other array libraries are installed which are incompatible
|
||||
# with the installed NumPy version, there can be errors on importing
|
||||
# `array_api_compat`. This should only raise if SciPy is configured with
|
||||
# that library as an available backend.
|
||||
backends = {'cupy', 'torch', 'dask.array'}
|
||||
for backend in backends:
|
||||
path = f'array_api_compat.{backend}'
|
||||
if path in name and backend not in xp_available_backends:
|
||||
return
|
||||
raise
|
||||
|
||||
modnames = []
|
||||
|
||||
with np.testing.suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning,"scipy.misc")
|
||||
for _, modname, _ in pkgutil.walk_packages(path=scipy.__path__,
|
||||
prefix=scipy.__name__ + '.',
|
||||
onerror=ignore_errors):
|
||||
if is_unexpected(modname) and modname not in SKIP_LIST:
|
||||
# We have a name that is new. If that's on purpose, add it to
|
||||
# PUBLIC_MODULES. We don't expect to have to add anything to
|
||||
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
|
||||
modnames.append(modname)
|
||||
|
||||
if modnames:
|
||||
raise AssertionError(f'Found unexpected modules: {modnames}')
|
||||
|
||||
|
||||
# Stuff that clearly shouldn't be in the API and is detected by the next test
|
||||
# below
|
||||
SKIP_LIST_2 = [
|
||||
'scipy.char',
|
||||
'scipy.rec',
|
||||
'scipy.emath',
|
||||
'scipy.math',
|
||||
'scipy.random',
|
||||
'scipy.ctypeslib',
|
||||
'scipy.ma'
|
||||
]
|
||||
|
||||
|
||||
def test_all_modules_are_expected_2():
|
||||
"""
|
||||
Method checking all objects. The pkgutil-based method in
|
||||
`test_all_modules_are_expected` does not catch imports into a namespace,
|
||||
only filenames.
|
||||
"""
|
||||
|
||||
def find_unexpected_members(mod_name):
|
||||
members = []
|
||||
module = importlib.import_module(mod_name)
|
||||
if hasattr(module, '__all__'):
|
||||
objnames = module.__all__
|
||||
else:
|
||||
objnames = dir(module)
|
||||
|
||||
for objname in objnames:
|
||||
if not objname.startswith('_'):
|
||||
fullobjname = mod_name + '.' + objname
|
||||
if isinstance(getattr(module, objname), types.ModuleType):
|
||||
if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
|
||||
members.append(fullobjname)
|
||||
|
||||
return members
|
||||
with np.testing.suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, "scipy.misc")
|
||||
unexpected_members = find_unexpected_members("scipy")
|
||||
|
||||
for modname in PUBLIC_MODULES:
|
||||
unexpected_members.extend(find_unexpected_members(modname))
|
||||
|
||||
if unexpected_members:
|
||||
raise AssertionError("Found unexpected object(s) that look like "
|
||||
f"modules: {unexpected_members}")
|
||||
|
||||
|
||||
def test_api_importable():
|
||||
"""
|
||||
Check that all submodules listed higher up in this file can be imported
|
||||
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
|
||||
simply need to be removed from the list (deprecation may or may not be
|
||||
needed - apply common sense).
|
||||
"""
|
||||
def check_importable(module_name):
|
||||
try:
|
||||
importlib.import_module(module_name)
|
||||
except (ImportError, AttributeError):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
module_names = []
|
||||
for module_name in PUBLIC_MODULES:
|
||||
if not check_importable(module_name):
|
||||
module_names.append(module_name)
|
||||
|
||||
if module_names:
|
||||
raise AssertionError("Modules in the public API that cannot be "
|
||||
f"imported: {module_names}")
|
||||
|
||||
with warnings.catch_warnings(record=True):
|
||||
warnings.filterwarnings('always', category=DeprecationWarning)
|
||||
warnings.filterwarnings('always', category=ImportWarning)
|
||||
for module_name in PRIVATE_BUT_PRESENT_MODULES:
|
||||
if not check_importable(module_name):
|
||||
module_names.append(module_name)
|
||||
|
||||
if module_names:
|
||||
raise AssertionError("Modules that are not really public but looked "
|
||||
"public and can not be imported: "
|
||||
f"{module_names}")
|
||||
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
@pytest.mark.parametrize(("module_name", "correct_module"),
|
||||
[('scipy.constants.codata', None),
|
||||
('scipy.constants.constants', None),
|
||||
('scipy.fftpack.basic', None),
|
||||
('scipy.fftpack.helper', None),
|
||||
('scipy.fftpack.pseudo_diffs', None),
|
||||
('scipy.fftpack.realtransforms', None),
|
||||
('scipy.integrate.dop', None),
|
||||
('scipy.integrate.lsoda', None),
|
||||
('scipy.integrate.odepack', None),
|
||||
('scipy.integrate.quadpack', None),
|
||||
('scipy.integrate.vode', None),
|
||||
('scipy.interpolate.dfitpack', None),
|
||||
('scipy.interpolate.fitpack', None),
|
||||
('scipy.interpolate.fitpack2', None),
|
||||
('scipy.interpolate.interpolate', None),
|
||||
('scipy.interpolate.ndgriddata', None),
|
||||
('scipy.interpolate.polyint', None),
|
||||
('scipy.interpolate.rbf', None),
|
||||
('scipy.io.harwell_boeing', None),
|
||||
('scipy.io.idl', None),
|
||||
('scipy.io.mmio', None),
|
||||
('scipy.io.netcdf', None),
|
||||
('scipy.io.arff.arffread', 'arff'),
|
||||
('scipy.io.matlab.byteordercodes', 'matlab'),
|
||||
('scipy.io.matlab.mio_utils', 'matlab'),
|
||||
('scipy.io.matlab.mio', 'matlab'),
|
||||
('scipy.io.matlab.mio4', 'matlab'),
|
||||
('scipy.io.matlab.mio5_params', 'matlab'),
|
||||
('scipy.io.matlab.mio5_utils', 'matlab'),
|
||||
('scipy.io.matlab.mio5', 'matlab'),
|
||||
('scipy.io.matlab.miobase', 'matlab'),
|
||||
('scipy.io.matlab.streams', 'matlab'),
|
||||
('scipy.linalg.basic', None),
|
||||
('scipy.linalg.decomp', None),
|
||||
('scipy.linalg.decomp_cholesky', None),
|
||||
('scipy.linalg.decomp_lu', None),
|
||||
('scipy.linalg.decomp_qr', None),
|
||||
('scipy.linalg.decomp_schur', None),
|
||||
('scipy.linalg.decomp_svd', None),
|
||||
('scipy.linalg.matfuncs', None),
|
||||
('scipy.linalg.misc', None),
|
||||
('scipy.linalg.special_matrices', None),
|
||||
('scipy.ndimage.filters', None),
|
||||
('scipy.ndimage.fourier', None),
|
||||
('scipy.ndimage.interpolation', None),
|
||||
('scipy.ndimage.measurements', None),
|
||||
('scipy.ndimage.morphology', None),
|
||||
('scipy.odr.models', None),
|
||||
('scipy.odr.odrpack', None),
|
||||
('scipy.optimize.cobyla', None),
|
||||
('scipy.optimize.lbfgsb', None),
|
||||
('scipy.optimize.linesearch', None),
|
||||
('scipy.optimize.minpack', None),
|
||||
('scipy.optimize.minpack2', None),
|
||||
('scipy.optimize.moduleTNC', None),
|
||||
('scipy.optimize.nonlin', None),
|
||||
('scipy.optimize.optimize', None),
|
||||
('scipy.optimize.slsqp', None),
|
||||
('scipy.optimize.tnc', None),
|
||||
('scipy.optimize.zeros', None),
|
||||
('scipy.signal.bsplines', None),
|
||||
('scipy.signal.filter_design', None),
|
||||
('scipy.signal.fir_filter_design', None),
|
||||
('scipy.signal.lti_conversion', None),
|
||||
('scipy.signal.ltisys', None),
|
||||
('scipy.signal.signaltools', None),
|
||||
('scipy.signal.spectral', None),
|
||||
('scipy.signal.spline', None),
|
||||
('scipy.signal.waveforms', None),
|
||||
('scipy.signal.wavelets', None),
|
||||
('scipy.signal.windows.windows', 'windows'),
|
||||
('scipy.sparse.base', None),
|
||||
('scipy.sparse.bsr', None),
|
||||
('scipy.sparse.compressed', None),
|
||||
('scipy.sparse.construct', None),
|
||||
('scipy.sparse.coo', None),
|
||||
('scipy.sparse.csc', None),
|
||||
('scipy.sparse.csr', None),
|
||||
('scipy.sparse.data', None),
|
||||
('scipy.sparse.dia', None),
|
||||
('scipy.sparse.dok', None),
|
||||
('scipy.sparse.extract', None),
|
||||
('scipy.sparse.lil', None),
|
||||
('scipy.sparse.linalg.dsolve', 'linalg'),
|
||||
('scipy.sparse.linalg.eigen', 'linalg'),
|
||||
('scipy.sparse.linalg.interface', 'linalg'),
|
||||
('scipy.sparse.linalg.isolve', 'linalg'),
|
||||
('scipy.sparse.linalg.matfuncs', 'linalg'),
|
||||
('scipy.sparse.sparsetools', None),
|
||||
('scipy.sparse.spfuncs', None),
|
||||
('scipy.sparse.sputils', None),
|
||||
('scipy.spatial.ckdtree', None),
|
||||
('scipy.spatial.kdtree', None),
|
||||
('scipy.spatial.qhull', None),
|
||||
('scipy.spatial.transform.rotation', 'transform'),
|
||||
('scipy.special.add_newdocs', None),
|
||||
('scipy.special.basic', None),
|
||||
('scipy.special.orthogonal', None),
|
||||
('scipy.special.sf_error', None),
|
||||
('scipy.special.specfun', None),
|
||||
('scipy.special.spfun_stats', None),
|
||||
('scipy.stats.biasedurn', None),
|
||||
('scipy.stats.kde', None),
|
||||
('scipy.stats.morestats', None),
|
||||
('scipy.stats.mstats_basic', 'mstats'),
|
||||
('scipy.stats.mstats_extras', 'mstats'),
|
||||
('scipy.stats.mvn', None),
|
||||
('scipy.stats.stats', None)])
|
||||
def test_private_but_present_deprecation(module_name, correct_module):
|
||||
# gh-18279, gh-17572, gh-17771 noted that deprecation warnings
|
||||
# for imports from private modules
|
||||
# were misleading. Check that this is resolved.
|
||||
module = import_module(module_name)
|
||||
if correct_module is None:
|
||||
import_name = f'scipy.{module_name.split(".")[1]}'
|
||||
else:
|
||||
import_name = f'scipy.{module_name.split(".")[1]}.{correct_module}'
|
||||
|
||||
correct_import = import_module(import_name)
|
||||
|
||||
# Attributes that were formerly in `module_name` can still be imported from
|
||||
# `module_name`, albeit with a deprecation warning.
|
||||
for attr_name in module.__all__:
|
||||
# ensure attribute is present where the warning is pointing
|
||||
assert getattr(correct_import, attr_name, None) is not None
|
||||
message = f"Please import `{attr_name}` from the `{import_name}`..."
|
||||
with pytest.deprecated_call(match=message):
|
||||
getattr(module, attr_name)
|
||||
|
||||
# Attributes that were not in `module_name` get an error notifying the user
|
||||
# that the attribute is not in `module_name` and that `module_name` is deprecated.
|
||||
message = f"`{module_name}` is deprecated..."
|
||||
with pytest.raises(AttributeError, match=message):
|
||||
getattr(module, "ekki")
|
||||
@@ -0,0 +1,28 @@
|
||||
import re
|
||||
|
||||
import scipy
|
||||
import scipy.version
|
||||
|
||||
|
||||
def test_valid_scipy_version():
|
||||
# Verify that the SciPy version is a valid one (no .post suffix or other
|
||||
# nonsense). See NumPy issue gh-6431 for an issue caused by an invalid
|
||||
# version.
|
||||
version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])"
|
||||
dev_suffix = r"((.dev0)|(\.dev0+\+git[0-9]{8}.[0-9a-f]{7}))"
|
||||
if scipy.version.release:
|
||||
res = re.match(version_pattern, scipy.__version__)
|
||||
else:
|
||||
res = re.match(version_pattern + dev_suffix, scipy.__version__)
|
||||
|
||||
assert res is not None
|
||||
assert scipy.__version__
|
||||
|
||||
|
||||
def test_version_submodule_members():
|
||||
"""`scipy.version` may not be quite public, but we install it.
|
||||
|
||||
So check that we don't silently change its contents.
|
||||
"""
|
||||
for attr in ('version', 'full_version', 'short_version', 'git_revision', 'release'):
|
||||
assert hasattr(scipy.version, attr)
|
||||
@@ -0,0 +1,48 @@
|
||||
""" Test tmpdirs module """
|
||||
from os import getcwd
|
||||
from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists
|
||||
|
||||
from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir
|
||||
|
||||
from numpy.testing import assert_, assert_equal
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
MY_PATH = abspath(__file__)
|
||||
MY_DIR = dirname(MY_PATH)
|
||||
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_tempdir():
|
||||
with tempdir() as tmpdir:
|
||||
fname = pjoin(tmpdir, 'example_file.txt')
|
||||
with open(fname, "w") as fobj:
|
||||
fobj.write('a string\\n')
|
||||
assert_(not exists(tmpdir))
|
||||
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_in_tempdir():
|
||||
my_cwd = getcwd()
|
||||
with in_tempdir() as tmpdir:
|
||||
with open('test.txt', "w") as f:
|
||||
f.write('some text')
|
||||
assert_(isfile('test.txt'))
|
||||
assert_(isfile(pjoin(tmpdir, 'test.txt')))
|
||||
assert_(not exists(tmpdir))
|
||||
assert_equal(getcwd(), my_cwd)
|
||||
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_given_directory():
|
||||
# Test InGivenDirectory
|
||||
cwd = getcwd()
|
||||
with in_dir() as tmpdir:
|
||||
assert_equal(tmpdir, abspath(cwd))
|
||||
assert_equal(tmpdir, abspath(getcwd()))
|
||||
with in_dir(MY_DIR) as tmpdir:
|
||||
assert_equal(tmpdir, MY_DIR)
|
||||
assert_equal(realpath(MY_DIR), realpath(abspath(getcwd())))
|
||||
# We were deleting the given directory! Check not so now.
|
||||
assert_(isfile(MY_PATH))
|
||||
@@ -0,0 +1,137 @@
|
||||
"""
|
||||
Tests which scan for certain occurrences in the code, they may not find
|
||||
all of these occurrences but should catch almost all. This file was adapted
|
||||
from NumPy.
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
import ast
|
||||
import tokenize
|
||||
|
||||
import scipy
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class ParseCall(ast.NodeVisitor):
|
||||
def __init__(self):
|
||||
self.ls = []
|
||||
|
||||
def visit_Attribute(self, node):
|
||||
ast.NodeVisitor.generic_visit(self, node)
|
||||
self.ls.append(node.attr)
|
||||
|
||||
def visit_Name(self, node):
|
||||
self.ls.append(node.id)
|
||||
|
||||
|
||||
class FindFuncs(ast.NodeVisitor):
|
||||
def __init__(self, filename):
|
||||
super().__init__()
|
||||
self.__filename = filename
|
||||
self.bad_filters = []
|
||||
self.bad_stacklevels = []
|
||||
|
||||
def visit_Call(self, node):
|
||||
p = ParseCall()
|
||||
p.visit(node.func)
|
||||
ast.NodeVisitor.generic_visit(self, node)
|
||||
|
||||
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
|
||||
# get first argument of the `args` node of the filter call
|
||||
match node.args[0]:
|
||||
case ast.Constant() as c:
|
||||
argtext = c.value
|
||||
case ast.JoinedStr() as js:
|
||||
# if we get an f-string, discard the templated pieces, which
|
||||
# are likely the type or specific message; we're interested
|
||||
# in the action, which is less likely to use a template
|
||||
argtext = "".join(
|
||||
x.value for x in js.values if isinstance(x, ast.Constant)
|
||||
)
|
||||
case _:
|
||||
raise ValueError("unknown ast node type")
|
||||
# check if filter is set to ignore
|
||||
if argtext == "ignore":
|
||||
self.bad_filters.append(
|
||||
f"{self.__filename}:{node.lineno}")
|
||||
|
||||
if p.ls[-1] == 'warn' and (
|
||||
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
|
||||
|
||||
if self.__filename == "_lib/tests/test_warnings.py":
|
||||
# This file
|
||||
return
|
||||
|
||||
# See if stacklevel exists:
|
||||
if len(node.args) == 3:
|
||||
return
|
||||
args = {kw.arg for kw in node.keywords}
|
||||
if "stacklevel" not in args:
|
||||
self.bad_stacklevels.append(
|
||||
f"{self.__filename}:{node.lineno}")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def warning_calls():
|
||||
# combined "ignore" and stacklevel error
|
||||
base = Path(scipy.__file__).parent
|
||||
|
||||
bad_filters = []
|
||||
bad_stacklevels = []
|
||||
|
||||
for path in base.rglob("*.py"):
|
||||
# use tokenize to auto-detect encoding on systems where no
|
||||
# default encoding is defined (e.g., LANG='C')
|
||||
with tokenize.open(str(path)) as file:
|
||||
tree = ast.parse(file.read(), filename=str(path))
|
||||
finder = FindFuncs(path.relative_to(base))
|
||||
finder.visit(tree)
|
||||
bad_filters.extend(finder.bad_filters)
|
||||
bad_stacklevels.extend(finder.bad_stacklevels)
|
||||
|
||||
return bad_filters, bad_stacklevels
|
||||
|
||||
|
||||
@pytest.mark.fail_slow(40)
|
||||
@pytest.mark.slow
|
||||
def test_warning_calls_filters(warning_calls):
|
||||
bad_filters, bad_stacklevels = warning_calls
|
||||
|
||||
# We try not to add filters in the code base, because those filters aren't
|
||||
# thread-safe. We aim to only filter in tests with
|
||||
# np.testing.suppress_warnings. However, in some cases it may prove
|
||||
# necessary to filter out warnings, because we can't (easily) fix the root
|
||||
# cause for them and we don't want users to see some warnings when they use
|
||||
# SciPy correctly. So we list exceptions here. Add new entries only if
|
||||
# there's a good reason.
|
||||
allowed_filters = (
|
||||
os.path.join('datasets', '_fetchers.py'),
|
||||
os.path.join('datasets', '__init__.py'),
|
||||
os.path.join('optimize', '_optimize.py'),
|
||||
os.path.join('optimize', '_constraints.py'),
|
||||
os.path.join('optimize', '_nnls.py'),
|
||||
os.path.join('signal', '_ltisys.py'),
|
||||
os.path.join('sparse', '__init__.py'), # np.matrix pending-deprecation
|
||||
os.path.join('special', '_basic.py'), # gh-21801
|
||||
os.path.join('stats', '_discrete_distns.py'), # gh-14901
|
||||
os.path.join('stats', '_continuous_distns.py'),
|
||||
os.path.join('stats', '_binned_statistic.py'), # gh-19345
|
||||
os.path.join('stats', '_stats_py.py'), # gh-20743
|
||||
os.path.join('stats', 'tests', 'test_axis_nan_policy.py'), # gh-20694
|
||||
os.path.join('_lib', '_util.py'), # gh-19341
|
||||
os.path.join('sparse', 'linalg', '_dsolve', 'linsolve.py'), # gh-17924
|
||||
"conftest.py",
|
||||
)
|
||||
bad_filters = [item for item in bad_filters if item.split(':')[0] not in
|
||||
allowed_filters]
|
||||
|
||||
if bad_filters:
|
||||
raise AssertionError(
|
||||
"warning ignore filter should not be used, instead, use\n"
|
||||
"numpy.testing.suppress_warnings (in tests only);\n"
|
||||
"found in:\n {}".format(
|
||||
"\n ".join(bad_filters)))
|
||||
|
||||
Reference in New Issue
Block a user