Skip to content

Commit

Permalink
dependencies: Update to numpy<1.24.4 (#2693)
Browse files Browse the repository at this point in the history
Numpy 1.24 removes few deprecated features and introduces new deprecations;
* construction of rugged arrays now throws ValueError.
    * Update `safe_create_np_array` to handle both the original DeprecationWarning and the new ValueError
    * Extract values of all mechanisms in `test_save_state_before_simulations` to avoid rugged array
* Conversion from Python integer types warns on range overflow.
    * Switch representation of ctype integers to unsigned. Most uses expect unsigned results.
  • Loading branch information
jvesely authored Jun 9, 2023
2 parents 5e6febd + 3eeea4b commit 59b1b20
Show file tree
Hide file tree
Showing 8 changed files with 41 additions and 30 deletions.
8 changes: 8 additions & 0 deletions psyneulink/core/globals/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -1022,6 +1022,14 @@ def safe_create_np_array(value):
return np.asarray(value)
except np.VisibleDeprecationWarning:
return np.asarray(value, dtype=object)
except ValueError as e:
# numpy 1.24 removed the above deprecation and raises
# ValueError instead. Note that the below call can still
# raise other ValueErrors
if 'The requested array has an inhomogeneous shape' in str(e):
return np.asarray(value, dtype=object)
raise

except ValueError as e:
msg = str(e)
if 'cannot guess the desired dtype from the input' in msg:
Expand Down
8 changes: 4 additions & 4 deletions psyneulink/core/llvm/builder_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -636,13 +636,13 @@ def _convert_llvm_ir_to_ctype(t: ir.Type):
if t.width == 1:
return ctypes.c_bool
elif t.width == 8:
return ctypes.c_int8
return ctypes.c_uint8
elif t.width == 16:
return ctypes.c_int16
return ctypes.c_uint16
elif t.width == 32:
return ctypes.c_int32
return ctypes.c_uint32
elif t.width == 64:
return ctypes.c_int64
return ctypes.c_uint64
else:
assert False, "Unknown integer type: {}".format(type_t)
elif type_t is ir.DoubleType:
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ llvmlite<0.41
matplotlib<3.7.2
modeci_mdf<0.5, >=0.3.4; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython'
networkx<3.2
numpy>=1.19.0, <1.23.6
numpy>=1.19.0, <1.24.4
optuna<3.3.0
packaging<24.0
pandas<2.0.3
Expand Down
29 changes: 16 additions & 13 deletions tests/composition/test_composition.py
Original file line number Diff line number Diff line change
Expand Up @@ -6773,10 +6773,12 @@ def test_save_state_before_simulations(self):

comp.run(inputs={A: [[1.0], [1.0]]})

# All mechanism values are 2D but B has two elements,
# extract element 0 out of each.
run_1_values = [
A.parameters.value.get(comp),
A.parameters.value.get(comp)[0],
B.parameters.value.get(comp)[0],
C.parameters.value.get(comp)
C.parameters.value.get(comp)[0]
]

# "Save state" code from EVCaux
Expand All @@ -6802,25 +6804,26 @@ def test_save_state_before_simulations(self):
# Allow values to continue accumulating so that we can set them back to the saved state
comp.run(inputs={A: [[1.0], [1.0]]})

run_2_values = [A.parameters.value.get(comp),
# All mechanism values are 2D but B has two elements,
# extract element 0 out of each.
run_2_values = [A.parameters.value.get(comp)[0],
B.parameters.value.get(comp)[0],
C.parameters.value.get(comp)]
C.parameters.value.get(comp)[0]]

comp.run(
inputs={A: [[1.0], [1.0]]},
reset_stateful_functions_to=reinitialization_values
)

run_3_values = [A.parameters.value.get(comp),
# All mechanism values are 2D but B has two elements,
# extract element 0 out of each.
run_3_values = [A.parameters.value.get(comp)[0],
B.parameters.value.get(comp)[0],
C.parameters.value.get(comp)]

np.testing.assert_allclose(np.asfarray(run_2_values),
np.asfarray(run_3_values))
np.testing.assert_allclose(np.asfarray(run_1_values),
[np.array([0.36]), np.array([0.056]), np.array([0.056])])
np.testing.assert_allclose(np.asfarray(run_2_values),
[np.array([0.5904]), np.array([0.16384]), np.array([0.16384])])
C.parameters.value.get(comp)[0]]

np.testing.assert_allclose(run_2_values, run_3_values)
np.testing.assert_allclose(np.asfarray(run_1_values), [[0.36], [0.056], [0.056]])
np.testing.assert_allclose(np.asfarray(run_2_values), [[0.5904], [0.16384], [0.16384]])


class TestNodeRoles:
Expand Down
4 changes: 2 additions & 2 deletions tests/llvm/test_builtins_mt_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def f():
init_fun(state, SEED)

gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32')
out = ctypes.c_longlong()
out = ctypes.c_ulonglong()
def f():
gen_fun(state, out)
return out.value
Expand All @@ -39,7 +39,7 @@ def f():
init_fun.cuda_call(gpu_state, np.int32(SEED))

gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32')
out = np.asarray([0], dtype=np.int64)
out = np.asarray([0], dtype=np.uint64)
gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out)
def f():
gen_fun.cuda_call(gpu_state, gpu_out)
Expand Down
8 changes: 4 additions & 4 deletions tests/llvm/test_builtins_philox_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
def test_random_int64(benchmark, mode, seed, expected):
res = []
if mode == 'numpy':
state = np.random.Philox([np.uint64(seed)])
state = np.random.Philox([np.int64(seed).astype(np.uint64)])
prng = np.random.Generator(state)
def f():
# Get uint range [0, MAX] to avoid any intermediate caching of random bits
Expand All @@ -31,7 +31,7 @@ def f():
init_fun(state, seed)

gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64')
out = ctypes.c_longlong()
out = ctypes.c_ulonglong()
def f():
gen_fun(state, out)
return np.uint64(out.value)
Expand Down Expand Up @@ -75,10 +75,10 @@ def f():
init_fun(state, SEED)

gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32')
out = ctypes.c_int()
out = ctypes.c_uint()
def f():
gen_fun(state, out)
return np.uint32(out.value)
return out.value
elif mode == 'PTX':
init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init')
state_size = ctypes.sizeof(init_fun.byref_arg_types[0])
Expand Down
8 changes: 4 additions & 4 deletions tests/llvm/test_custom_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@
@pytest.mark.llvm
@pytest.mark.parametrize('mode', ['CPU',
pytest.param('PTX', marks=pytest.mark.cuda)])
@pytest.mark.parametrize('val', [np.int8(0x7e),
np.int16(0x7eec),
np.int32(0x7eedbeee),
np.int64(0x7eedcafedeadbeee)
@pytest.mark.parametrize('val', [np.uint8(0x7e),
np.uint16(0x7eec),
np.uint32(0x7eedbeee),
np.uint64(0x7eedcafedeadbeee)
], ids=lambda x: str(x.dtype))
def test_integer_broadcast(mode, val):
custom_name = None
Expand Down
4 changes: 2 additions & 2 deletions tests/llvm/test_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,11 +212,11 @@ def test_helper_all_close(mode, var1, var2, atol, rtol):
ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0])
ct_vec1 = vec1.ctypes.data_as(ct_ty)
ct_vec2 = vec2.ctypes.data_as(ct_ty)
res = ctypes.c_int32()
res = ctypes.c_uint32()

bin_f(ct_vec1, ct_vec2, ctypes.byref(res))
else:
res = np.array([5], dtype=np.int32)
res = np.array([5], dtype=np.uint32)
bin_f.cuda_wrap_call(vec1, vec2, res)
res = res[0]

Expand Down

0 comments on commit 59b1b20

Please sign in to comment.