From a73ab288e91da1ef4f2b2557110f2af86f6d51bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Dec 2023 16:23:45 +0000 Subject: [PATCH 01/65] github-actions(deps): bump actions/download-artifact from 3 to 4 (#2865) --- .github/workflows/pnl-ci-docs.yml | 4 ++-- .github/workflows/test-release.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 0bca607ab25..cc4d7a4350e 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -168,7 +168,7 @@ jobs: ref: gh-pages - name: Download branch docs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 path: _built_docs/${{ github.ref }} @@ -185,7 +185,7 @@ jobs: if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' - name: Download main docs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 # This overwrites files in current directory diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index 45cacf39c88..52db3f8d92a 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -78,7 +78,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ @@ -141,7 +141,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ @@ -175,7 +175,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ From fdb4e0c3c331166115b6538bf5a19a42ba8e4e2f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 23 Dec 2023 11:18:10 -0500 Subject: [PATCH 02/65] Revert "github-actions(deps): bump actions/download-artifact from 3 to 4 (#2865)" (#2867) This reverts commit a73ab288e91da1ef4f2b2557110f2af86f6d51bd. Fails to download documentation artifacts. --- .github/workflows/pnl-ci-docs.yml | 4 ++-- .github/workflows/test-release.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index cc4d7a4350e..0bca607ab25 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -168,7 +168,7 @@ jobs: ref: gh-pages - name: Download branch docs - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v3 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 path: _built_docs/${{ github.ref }} @@ -185,7 +185,7 @@ jobs: if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' - name: Download main docs - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v3 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 # This overwrites files in current directory diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index 52db3f8d92a..45cacf39c88 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -78,7 +78,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v3 with: name: Python-dist-files path: dist/ @@ -141,7 +141,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v3 with: name: Python-dist-files path: dist/ @@ -175,7 +175,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v3 with: name: Python-dist-files path: dist/ From 120e3181d38074c2bed35da59581ea4574665fe2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 30 Dec 2023 00:51:31 +0000 Subject: [PATCH 03/65] requirements: update grpcio requirement from <1.60.0 to <1.61.0 (#2855) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2106c31f7b4..c2a42c9b4da 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ dill<0.3.8 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 graphviz<0.21.0 -grpcio<1.60.0 +grpcio<1.61.0 leabra-psyneulink<0.3.3 llvmlite<0.42 matplotlib<3.7.3 From c425bf3a42e900799bd728772f4d20e459b90fd7 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 31 Dec 2023 15:12:16 -0500 Subject: [PATCH 04/65] requirements: Bump pycuda to <2024 (#2869) Signed-off-by: Jan Vesely --- cuda_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cuda_requirements.txt b/cuda_requirements.txt index 63e22850e71..3a4f02b4cc9 100644 --- a/cuda_requirements.txt +++ b/cuda_requirements.txt @@ -1 +1 @@ -pycuda >2018, <2023 +pycuda >2018, <2024 From ef40bac04175e9c49e07d12a1f9269bc30f9f445 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 19:20:51 +0000 Subject: [PATCH 05/65] requirements: update pytest requirement from <7.4.4 to <7.4.5 (#2870) --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index e992d1087d4..a229217a04f 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<24.0 -pytest<7.4.4 +pytest<7.4.5 pytest-benchmark<4.0.1 pytest-cov<4.1.1 pytest-forked<1.7.0 From 822fc4f9403d9975d783b63dcf6dd0a71edbd4d3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 22 Dec 2023 14:34:28 +0100 Subject: [PATCH 06/65] llvm, Mechanism: Use checked API to get params/state Signed-off-by: Jan Vesely --- psyneulink/core/components/mechanisms/mechanism.py | 2 +- .../modulatory/control/agt/lccontrolmechanism.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index a019eff7668..3a4bafca984 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -3053,7 +3053,7 @@ def _gen_llvm_output_port_parse_variable(self, ctx, builder, if name == OWNER_VALUE: data = value elif name in self.llvm_state_ids: - data = pnlvm.helpers.get_state_ptr(builder, self, mech_state, name) + data = ctx.get_param_or_state_ptr(builder, self, name, state_struct_ptr=mech_state) else: data = None diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py index c2fe17f2936..2ce1e6afeab 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py @@ -891,10 +891,10 @@ def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, m m_val = builder.alloca(mech_out_ty, name="mechanism_out") # Load mechanism parameters - scaling_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, m_params, - "scaling_factor_gain") - base_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, m_params, - "base_level_gain") + scaling_factor_ptr = ctx.get_param_or_state_ptr(builder, self, "scaling_factor_gain", + param_struct_ptr=m_params) + base_factor_ptr = ctx.get_param_or_state_ptr(builder, self, "base_level_gain", + param_struct_ptr=m_params) # If modulated, parameters are single element array scaling_factor = pnlvm.helpers.load_extract_scalar_array_one(builder, scaling_factor_ptr) From 7d7d87997498580c4694a05e1e8c53b3322ab9b5 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 31 Dec 2023 22:33:51 -0500 Subject: [PATCH 07/65] llvm, Component: Drop 'shadow_inputs' from compiled state structure Not used in compiled code. It's been absent from compiled parameter structures for some time. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index d1b2502ef18..e53ccca766d 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1301,7 +1301,7 @@ def _get_compilation_state(self): "intensity"} # Prune subcomponents (which are enabled by type rather than a list) # that should be omitted - blacklist = { "objective_mechanism", "agent_rep", "projections"} + blacklist = { "objective_mechanism", "agent_rep", "projections", "shadow_inputs"} # Only mechanisms use "value" state, can execute 'until finished', # and need to track executions From 26cd5708a72d93c1bb87d0a2ecaabfddbe7b50ea Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 31 Dec 2023 22:40:28 -0500 Subject: [PATCH 08/65] llvm, Component: Drop 'weight' and 'exponent' from compiled parameters Not used, but present in ports and projections. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index e53ccca766d..dc491d2e6fe 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1426,7 +1426,7 @@ def _get_compilation_params(self): "randomization_dimension", "save_values", "save_samples", "max_iterations", "duplicate_keys", "search_termination_function", "state_feature_function", - "search_function", + "search_function", "weight", "exponent", # not used in compiled learning "learning_results", "learning_signal", "learning_signals", "error_matrix", "error_signal", "activation_input", From 7be9eb3c78e991e3524a939466c6bdb8d76b93aa Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 31 Dec 2023 22:41:29 -0500 Subject: [PATCH 09/65] llvm, Port: Use checked API to get params/state Signed-off-by: Jan Vesely --- psyneulink/core/components/ports/port.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 28588a87c0f..dc797799d53 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -2360,8 +2360,11 @@ def _get_input_struct_type(self, ctx): def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): port_f = ctx.import_llvm_function(self.function) - base_params = pnlvm.helpers.get_param_ptr(builder, self, params, - "function") + base_params, f_state = ctx.get_param_or_state_ptr(builder, + self, + "function", + param_struct_ptr=params, + state_struct_ptr=state) if any(a.sender.modulation != OVERRIDE for a in self.mod_afferents): # Create a local copy of the function parameters only if @@ -2426,12 +2429,13 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, if arg_out.type != port_f.args[3].type: assert len(arg_out.type.pointee) == 1 arg_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) + # Extract the data part of input if len(self.mod_afferents) == 0: f_input = arg_in else: f_input = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - f_state = pnlvm.helpers.get_state_ptr(builder, self, state, "function") + builder.call(port_f, [f_params, f_state, f_input, arg_out]) return builder From 6669977439c283251f10e9c2b471620e2747ede8 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 1 Jan 2024 01:10:05 -0500 Subject: [PATCH 10/65] llvm, Component: Drop 'gating_signal_params' from compiled parameters Not used in compiled execution. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index dc491d2e6fe..6e715788778 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1426,7 +1426,7 @@ def _get_compilation_params(self): "randomization_dimension", "save_values", "save_samples", "max_iterations", "duplicate_keys", "search_termination_function", "state_feature_function", - "search_function", "weight", "exponent", + "search_function", "weight", "exponent", "gating_signal_params", # not used in compiled learning "learning_results", "learning_signal", "learning_signals", "error_matrix", "error_signal", "activation_input", From 9f94acc429373225f1c3ee529b92cf5064b57df5 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 1 Jan 2024 01:11:04 -0500 Subject: [PATCH 11/65] llvm, Projection: Use checked API to get params/state Signed-off-by: Jan Vesely --- psyneulink/core/components/projections/projection.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index 8ae97232406..cdf46687532 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -1117,8 +1117,11 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, builder.store(builder.load(arg_in), arg_out) return builder - mf_state = pnlvm.helpers.get_state_ptr(builder, self, state, self.parameters.function.name) - mf_params = pnlvm.helpers.get_param_ptr(builder, self, params, self.parameters.function.name) + mf_params, mf_state = ctx.get_param_or_state_ptr(builder, + self, + self.parameters.function, + param_struct_ptr=params, + state_struct_ptr=state) main_function = ctx.import_llvm_function(self.function) builder.call(main_function, [mf_params, mf_state, arg_in, arg_out]) From f0f90b8e2e02ee3e8d3bfd3bee305a981deb3e83 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 2 Jan 2024 16:46:44 -0500 Subject: [PATCH 12/65] llvm, Component: Drop 'retain_old_simulation_data' from compiled structures Not used. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 1 + 1 file changed, 1 insertion(+) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 6e715788778..224ac4d2aed 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1427,6 +1427,7 @@ def _get_compilation_params(self): "max_iterations", "duplicate_keys", "search_termination_function", "state_feature_function", "search_function", "weight", "exponent", "gating_signal_params", + "retain_old_simulation_data", # not used in compiled learning "learning_results", "learning_signal", "learning_signals", "error_matrix", "error_signal", "activation_input", From cdd50d0739e4a840e527e4281b93b28e9d88e19e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 2 Jan 2024 17:08:55 -0500 Subject: [PATCH 13/65] llvm: Codestyle Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index e7f84c62e9e..dd5a384d481 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -820,7 +820,8 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): for idx, node in enumerate(composition._all_nodes): node_state = builder.gep(nodes_states, [ctx.int32_ty(0), ctx.int32_ty(idx)]) - num_exec_locs[node] = helpers.get_state_ptr(builder, node, + num_exec_locs[node] = helpers.get_state_ptr(builder, + node, node_state, "num_executions") @@ -1056,7 +1057,7 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): node_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(idx)]) num_executions_ptr = helpers.get_state_ptr(builder, node, node_state, "num_executions") num_exec_time_ptr = builder.gep(num_executions_ptr, [ctx.int32_ty(0), ctx.int32_ty(TimeScale.RUN.value)]) - builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) + builder.store(num_exec_time_ptr.type.pointee(None), num_exec_time_ptr) # Allocate and initialize condition structure cond_gen = helpers.ConditionGenerator(ctx, composition) From c14c59605a0d6bbba1c2b4893b527c52d6acfd07 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 2 Jan 2024 00:55:10 -0500 Subject: [PATCH 14/65] llvm, ControlSignal: Drop unused cost calculation code Costs are combined in 'net_outcome' function of OCM. Signed-off-by: Jan Vesely --- .../ports/modulatorysignals/controlsignal.py | 84 ------------------- 1 file changed, 84 deletions(-) diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index fe8f069bae6..2c3f401707b 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -1114,87 +1114,3 @@ def compute_costs(self, intensity, context=None): combined_cost = self.combine_costs_function(all_costs, context=context).astype(float) return max(0.0, combined_cost) - - def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, - extra_args=[], tags:frozenset): - if "costs" in tags: - assert len(extra_args) == 0 - return self._gen_llvm_costs(ctx=ctx, tags=tags) - - return super()._gen_llvm_function(ctx=ctx, extra_args=extra_args, tags=tags) - - def _gen_llvm_costs(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): - args = [ctx.get_param_struct_type(self).as_pointer(), - ctx.get_state_struct_type(self).as_pointer(), - ctx.get_input_struct_type(self).as_pointer()] - - assert "costs" in tags - builder = ctx.create_llvm_function(args, self, str(self) + "_costs", - tags=tags, - return_type=ctx.float_ty) - - params, state, arg_in = builder.function.args - - func_params = pnlvm.helpers.get_param_ptr(builder, self, params, - "function") - func_state = pnlvm.helpers.get_state_ptr(builder, self, state, - "function") - - # FIXME: This allows INTENSITY and NONE - assert self.cost_options & ~CostFunctions.INTENSITY == CostFunctions.NONE - - cfunc = ctx.import_llvm_function(self.function.combine_costs_fct) - cfunc_in = builder.alloca(cfunc.args[2].type.pointee, - name="combine_costs_func_in") - - # Set to 0 by default - builder.store(cfunc_in.type.pointee(None), cfunc_in) - - cost_funcs = 0 - if self.cost_options & CostFunctions.INTENSITY: - ifunc = ctx.import_llvm_function(self.function.intensity_cost_fct) - - ifunc_params = pnlvm.helpers.get_param_ptr(builder, self.function, - func_params, - "intensity_cost_fct") - ifunc_state = pnlvm.helpers.get_state_ptr(builder, self.function, - func_state, - "intensity_cost_fct") - # Port input is struct { data input, modulations } if there are modulations, - # otherwise it's just data_input - if len(self.mod_afferents) > 0: - ifunc_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - else: - ifunc_in = arg_in - # point output to the proper slot in comb func input - assert cost_funcs == 0, "Intensity should be the first cost function!" - ifunc_out = builder.gep(cfunc_in, [ctx.int32_ty(0), ctx.int32_ty(cost_funcs)]) - if ifunc_out.type != ifunc.args[3].type: - warnings.warn("Shape mismatch: {} element of combination func input ({}) doesn't match INTENSITY cost output ({})".format( - cost_funcs, self.function.combine_costs_fct.defaults.variable, - self.function.intensity_cost_fct.defaults.value), - pnlvm.PNLCompilerWarning) - assert self.cost_options == CostFunctions.INTENSITY - ifunc_out = cfunc_in - - builder.call(ifunc, [ifunc_params, ifunc_state, ifunc_in, ifunc_out]) - - cost_funcs += 1 - - - # Call combination function - cfunc_params = pnlvm.helpers.get_param_ptr(builder, self.function, - func_params, - "combine_costs_fct") - cfunc_state = pnlvm.helpers.get_state_ptr(builder, self.function, - func_state, - "combine_costs_fct") - cfunc_out = builder.alloca(cfunc.args[3].type.pointee, - name="combine_costs_func_out") - builder.call(cfunc, [cfunc_params, cfunc_state, cfunc_in, cfunc_out]) - - - ret_val = pnlvm.helpers.load_extract_scalar_array_one(builder, cfunc_out) - builder.ret(ret_val) - - return builder.function From f64e302c28b3c42ae24aa199458722b561ea1945 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 2 Jan 2024 23:11:22 -0500 Subject: [PATCH 15/65] llvm, OCM: Use tracking API to access OCM params in evaluate Signed-off-by: Jan Vesely --- .../control/optimizationcontrolmechanism.py | 34 +++++++++++-------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 2c0559a93f4..3fd505d122d 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -3358,13 +3358,15 @@ def _gen_llvm_evaluate_alloc_range_function(self, *, ctx:pnlvm.LLVMBuilderContex nodes_params = pnlvm.helpers.get_param_ptr(builder, self.composition, params, "nodes") - my_idx = self.composition._get_node_index(self) - my_params = builder.gep(nodes_params, [ctx.int32_ty(0), - ctx.int32_ty(my_idx)]) - num_trials_per_estimate_ptr = pnlvm.helpers.get_param_ptr(builder, self, - my_params, "num_trials_per_estimate") + controller_idx = self.composition._get_node_index(self) + controller_params = builder.gep(nodes_params, + [ctx.int32_ty(0), ctx.int32_ty(controller_idx)]) + num_trials_per_estimate_ptr = ctx.get_param_or_state_ptr(builder, + self, + "num_trials_per_estimate", + param_struct_ptr=controller_params) func_params = pnlvm.helpers.get_param_ptr(builder, self, - my_params, "function") + controller_params, "function") search_space = pnlvm.helpers.get_param_ptr(builder, self.function, func_params, "search_space") @@ -3428,7 +3430,7 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz assert self.composition.controller is self assert self.composition is self.agent_rep nodes_states = pnlvm.helpers.get_state_ptr(builder, self.composition, - comp_state, "nodes", None) + comp_state, "nodes") nodes_params = pnlvm.helpers.get_param_ptr(builder, self.composition, comp_params, "nodes") @@ -3442,15 +3444,16 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz assert len(self.output_ports) == len(allocation_sample.type.pointee) controller_out = builder.gep(comp_data, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(controller_idx)]) - all_op_state = pnlvm.helpers.get_state_ptr(builder, self, - controller_state, "output_ports") - all_op_params = pnlvm.helpers.get_param_ptr(builder, self, - controller_params, "output_ports") + all_op_params, all_op_states = ctx.get_param_or_state_ptr(builder, + self, + "output_ports", + param_struct_ptr=controller_params, + state_struct_ptr=controller_state) for i, op in enumerate(self.output_ports): op_idx = ctx.int32_ty(i) op_f = ctx.import_llvm_function(op, tags=frozenset({"simulation"})) - op_state = builder.gep(all_op_state, [ctx.int32_ty(0), op_idx]) + op_state = builder.gep(all_op_states, [ctx.int32_ty(0), op_idx]) op_params = builder.gep(all_op_params, [ctx.int32_ty(0), op_idx]) op_in = builder.alloca(op_f.args[2].type.pointee) op_out = builder.gep(controller_out, [ctx.int32_ty(0), op_idx]) @@ -3483,9 +3486,10 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz # Determine simulation counts - num_trials_per_estimate_ptr = pnlvm.helpers.get_param_ptr(builder, self, - controller_params, - "num_trials_per_estimate") + num_trials_per_estimate_ptr = ctx.get_param_or_state_ptr(builder, + self, + "num_trials_per_estimate", + param_struct_ptr=controller_params) num_trials_per_estimate = builder.load(num_trials_per_estimate_ptr, "num_trials_per_estimate") From 716f43427e40689fceba58cafab9a14c843ba60a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Jan 2024 01:25:18 -0500 Subject: [PATCH 16/65] llvm, AutodiffComposition: Pass state pointer to all places that access learned matrices Signed-off-by: Jan Vesely --- .../library/compositions/compiledoptimizer.py | 10 ++++++---- .../library/compositions/pytorchwrappers.py | 15 +++++++-------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/psyneulink/library/compositions/compiledoptimizer.py b/psyneulink/library/compositions/compiledoptimizer.py index aea8d4ebd45..20aa5e673ca 100644 --- a/psyneulink/library/compositions/compiledoptimizer.py +++ b/psyneulink/library/compositions/compiledoptimizer.py @@ -90,10 +90,11 @@ def step(self, ctx): name = self._composition.name + "_ADAM_STEP" args = [self._get_optimizer_struct_type(ctx).as_pointer(), + ctx.get_state_struct_type(self._composition).as_pointer(), ctx.get_param_struct_type(self._composition).as_pointer()] builder = ctx.create_llvm_function(args, self, name) llvm_func = builder.function - optim_struct, params = llvm_func.args + optim_struct, state, params = llvm_func.args # setup values zero = ctx.int32_ty(0) @@ -182,7 +183,7 @@ def step(self, ctx): pnlvm.helpers.printf_float_matrix(builder, delta_w_ptr, prefix=f"grad val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) # this is messy - #TODO - cleanup this - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) dim_x, dim_y = proj.matrix.shape weight_row = None @@ -233,10 +234,11 @@ def step(self, ctx): name = self._composition.name + "_SGD_STEP" args = [self._get_optimizer_struct_type(ctx).as_pointer(), + ctx.get_state_struct_type(self._composition).as_pointer(), ctx.get_param_struct_type(self._composition).as_pointer()] builder = ctx.create_llvm_function(args, self, name) llvm_func = builder.function - optim_struct, params = llvm_func.args + optim_struct, state, params = llvm_func.args zero = ctx.int32_ty(0) delta_w = builder.gep(optim_struct, [zero, ctx.int32_ty(self._DELTA_W_NUM)]) @@ -246,7 +248,7 @@ def step(self, ctx): # update weights for idx, proj in enumerate(self._pytorch_model.projection_wrappers): delta_w_ptr = builder.gep(delta_w, [zero, ctx.int32_ty(idx)]) - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) multiplied_delta_w = gen_inject_mat_scalar_mult(ctx, builder, delta_w_ptr, lr) gen_inject_mat_sub(ctx, builder, weights_llvmlite, multiplied_delta_w, weights_llvmlite) diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index 7bf3f8f7579..3fa91f24034 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -377,7 +377,7 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): efferent_node = proj.receiver efferent_node_error = error_dict[efferent_node] - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) if proj_idx == 0: gen_inject_vxm_transposed( @@ -406,7 +406,7 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): afferent_node_activation = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(proj.sender._idx), ctx.int32_ty(0)]) # get dimensions of weight matrix - weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, params) + weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) pnlvm.helpers.printf_float_matrix(builder, weights_llvmlite, prefix= f"{proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) # update delta_W node_delta_w = builder.gep(delta_w, [ctx.int32_ty(0), ctx.int32_ty(proj._idx)]) @@ -454,7 +454,7 @@ def _gen_llvm_training_function_body(self, ctx, builder, state, params, data): builder.call(optimizer_zero_grad, [optimizer_struct]) builder.call(backprop, [state, params, data, optimizer_struct]) - builder.call(optimizer_step_f, [optimizer_struct, params]) + builder.call(optimizer_step_f, [optimizer_struct, state, params]) def _get_compiled_optimizer(self): # setup optimizer @@ -770,10 +770,9 @@ def log_matrix(self): self._projection.parameters.matrix._set(detached_matrix, context=self._context) self._projection.parameter_ports['matrix'].parameters.value._set(detached_matrix, context=self._context) - def _extract_llvm_matrix(self, ctx, builder, params): - proj_params = builder.gep(params, [ctx.int32_ty(0), - ctx.int32_ty(1), - ctx.int32_ty(self._idx)]) + def _extract_llvm_matrix(self, ctx, builder, state, params): + proj_params = builder.gep(params, [ctx.int32_ty(0), ctx.int32_ty(1), ctx.int32_ty(self._idx)]) + proj_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1), ctx.int32_ty(self._idx)]) dim_x, dim_y = self.matrix.detach().numpy().shape proj_func = pnlvm.helpers.get_param_ptr(builder, self._projection, proj_params, "function") @@ -784,7 +783,7 @@ def _extract_llvm_matrix(self, ctx, builder, params): return proj_matrix def _gen_llvm_execute(self, ctx, builder, state, params, data): - proj_matrix = self._extract_llvm_matrix(ctx, builder, params) + proj_matrix = self._extract_llvm_matrix(ctx, builder, state, params) input_vec = builder.gep(data, [ctx.int32_ty(0), ctx.int32_ty(0), From 7c65b0ebc8911670804fa902f135f8cbd3c8f0ad Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Jan 2024 02:08:13 -0500 Subject: [PATCH 17/65] llvm, AutodiffComposition: Use universal/tracked API to access parameter/state structures Signed-off-by: Jan Vesely --- .../library/compositions/pytorchwrappers.py | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index 3fa91f24034..f94b017e6dd 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -690,10 +690,14 @@ def _gen_llvm_execute_derivative_func(self, ctx, builder, state, params, arg_in) ctx.int32_ty(0), ctx.int32_ty(self._idx)]) - f_params_ptr = pnlvm.helpers.get_param_ptr(builder, self._mechanism, mech_params, "function") + f_params, f_state = ctx.get_param_or_state_ptr(builder, + self._mechanism, + "function", + param_struct_ptr=mech_params, + state_struct_ptr=mech_state) + f_params, builder = self._mechanism._gen_llvm_param_ports_for_obj( - self._mechanism.function, f_params_ptr, ctx, builder, mech_params, mech_state, mech_input) - f_state = pnlvm.helpers.get_state_ptr(builder, self._mechanism, mech_state, "function") + self._mechanism.function, f_params, ctx, builder, mech_params, mech_state, mech_input) output, _ = self._mechanism._gen_llvm_invoke_function(ctx, builder, self._mechanism.function, f_params, f_state, mech_input, None, @@ -775,8 +779,19 @@ def _extract_llvm_matrix(self, ctx, builder, state, params): proj_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1), ctx.int32_ty(self._idx)]) dim_x, dim_y = self.matrix.detach().numpy().shape - proj_func = pnlvm.helpers.get_param_ptr(builder, self._projection, proj_params, "function") - proj_matrix = pnlvm.helpers.get_param_ptr(builder, self._projection.function, proj_func, "matrix") + + func_p, func_s = ctx.get_param_or_state_ptr(builder, + self._projection, + self._projection.parameters.function, + param_struct_ptr=proj_params, + state_struct_ptr=proj_state) + + proj_matrix = ctx.get_param_or_state_ptr(builder, + self._projection.function, + self._projection.function.parameters.matrix, + param_struct_ptr=func_p, + state_struct_ptr=func_s) + proj_matrix = builder.bitcast(proj_matrix, pnlvm.ir.types.ArrayType( pnlvm.ir.types.ArrayType(ctx.float_ty, dim_y), dim_x).as_pointer()) From 772f26503357e083d2ee1b7da9b67733adf4f4ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Jan 2024 14:43:42 +0000 Subject: [PATCH 18/65] requirements: update pillow requirement from <10.2.0 to <10.3.0 (#2872) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c2a42c9b4da..d9e4a43687f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ numpy>=1.21.0, <1.24.5 optuna<3.4.0 packaging<24.0 pandas<2.1.5 -pillow<10.2.0 +pillow<10.3.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 From 4fffbabaf72ce737444dfc38ea2ba04c24e13c05 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 4 Jan 2024 12:54:40 -0500 Subject: [PATCH 19/65] PECOptimizationFunction: Use opt_func instead of self.method to construct optuna study They currently refer to the same object but 'opt_func' was passed explicitly to the '_fit_optuna' method and can theoretically be different. Signed-off-by: Jan Vesely --- .../core/components/functions/nonstateful/fitfunctions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 86c5523d786..b54af944c10 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -806,7 +806,7 @@ def progress_callback(study, trial): optuna.logging.set_verbosity(optuna.logging.WARNING) study = optuna.create_study( - sampler=self.method, direction=self.direction + sampler=opt_func, direction=self.direction ) study.optimize( objfunc_wrapper_wrapper, From 760e380baebc860c4eee39e5fd62990d47be14c9 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 4 Jan 2024 13:55:07 -0500 Subject: [PATCH 20/65] tests/ParameterEstimationComposition: Use fixed seed for optuna sampler instances The test is using instantiated samplers so it needs to provide fixed seeds to guarantee deterministic behaviour. Passes 100 iterations of test_parameter_optimization_ddm[LLVM-optuna_cmaes_sampler] without failures. Bug: https://github.com/PrincetonUniversity/PsyNeuLink/issues/2874 Signed-off-by: Jan Vesely --- tests/composition/test_parameterestimationcomposition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 0dcde7a5562..0fd45cb4bc3 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -128,8 +128,8 @@ def test_pec_run_input_formats(inputs_dict, error_msg): "opt_method, result", [ ("differential_evolution", [0.010363518438648106]), - (optuna.samplers.RandomSampler(), [0.01]), - (optuna.samplers.CmaEsSampler(), [0.01]), + (optuna.samplers.RandomSampler(seed=0), [0.01]), + (optuna.samplers.CmaEsSampler(seed=0), [0.01]), ], ids=["differential_evolultion", "optuna_random_sampler", "optuna_cmaes_sampler"], ) From 3de156c2ccd2f5cd9e361bceeb45e52e56f19522 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 31 Dec 2023 15:23:38 -0500 Subject: [PATCH 21/65] tests/Composition: Check that running/executing with no inputs produces a warning Fixes 2 instances of: UserWarning: No inputs provided in call to ... Signed-off-by: Jan Vesely --- tests/composition/test_composition.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 40ce2f5beea..6f2381f3772 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -3833,7 +3833,10 @@ def test_execute_no_inputs(self, mode): inner_comp = Composition(pathways=[m_inner]) m_outer = ProcessingMechanism(size=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) - result = outer_comp.run(execution_mode=mode) + + with pytest.warns(UserWarning, match="No inputs provided in call"): + result = outer_comp.run(execution_mode=mode) + np.testing.assert_allclose(result, [[0.0, 0.0]]) @pytest.mark.composition @@ -3842,7 +3845,10 @@ def test_run_no_inputs(self, comp_mode): inner_comp = Composition(pathways=[m_inner]) m_outer = ProcessingMechanism(size=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) - result = outer_comp.run(execution_mode=comp_mode) + + with pytest.warns(UserWarning, match="No inputs provided in call"): + result = outer_comp.run(execution_mode=comp_mode) + np.testing.assert_allclose(result, [[0.0, 0.0]]) def test_lpp_invalid_matrix_keyword(self): From 41b041ce0f62f8e4303eeb571915c3fac69b784c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 31 Dec 2023 17:24:48 -0500 Subject: [PATCH 22/65] tests/learning: Enable node_spec_types test. Simplify parametrization ids. Fixes: SyntaxWarning: "is" with a literal. Did you mean "=="? Signed-off-by: Jan Vesely --- tests/composition/test_learning.py | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index f46b7472c2e..2cd856be390 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -36,8 +36,9 @@ def xor_network(): matrix=np.full((10,1), 0.1), sender=hidden_layer, receiver=output_layer) - inputs = np.array([[0, 0],[0, 1],[1, 0],[1, 1]]) - targets = np.array([[0],[1],[1],[0]]) + inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) + targets = np.array([[0], [1], [1], [0]]) + def _get_comp_type(comp_type, comp_learning_rate, pathway_learning_rate): if comp_type == 'composition': xor = Composition(learning_rate=comp_learning_rate) @@ -65,18 +66,15 @@ def _get_comp_type(comp_type, comp_learning_rate, pathway_learning_rate): class TestInputAndTargetSpecs: @pytest.mark.pytorch - @pytest.mark.parametrize('input_type', ['dict', 'func', 'gen', 'gen_func'], - ids=['dict', 'func', 'gen', 'gen_func']) + @pytest.mark.parametrize('input_type', ['dict', 'func', 'gen', 'gen_func']) @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.PyTorch, pnl.ExecutionMode.LLVMRun, - pnl.ExecutionMode.Python], - ids=['PyTorch', 'LLVM', 'Python']) - @pytest.mark.parametrize('comp_type', ['composition', 'autodiff'], - ids=['composition', 'autodiff']) - def node_spec_types(self, xor_network, comp_type, input_type, exec_mode): + pnl.ExecutionMode.Python]) + @pytest.mark.parametrize('comp_type', ['composition', 'autodiff']) + def test_node_spec_types(self, xor_network, comp_type, input_type, exec_mode): if comp_type == 'composition' and exec_mode != pnl.ExecutionMode.Python: - pytest.skip(f"Execution mode {exec_mode} not relevant for Composition") + pytest.skip(f"Execution mode {exec_mode} not relevant for Composition learn") comp, input_layer, hidden_layer, output_layer, target_mechanism, stims, targets =\ xor_network(comp_type, 0.001, None) @@ -113,12 +111,8 @@ def get_inputs_gen(): else: assert False, f"Unrecognized input_type: {input_type}" - expected_results = [[0.6341436044849351]] - if comp_type is 'composition': - results = comp.learn(inputs=inputs) - else: - results = comp.learn(inputs=inputs, execution_mode=exec_mode) - np.testing.assert_allclose(results, expected_results) + results = comp.learn(inputs=inputs, execution_mode=exec_mode) + np.testing.assert_allclose(results, [[0.6341436044849351]]) @pytest.mark.composition @pytest.mark.pytorch From f83218ded3351a6d92f06bae527fee4e7a66c946 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Jan 2024 15:34:54 -0500 Subject: [PATCH 23/65] tests/LCA: Add invocation wrapper to use in benchmark fixture Return both result and num_executions_before_finished. Fixes: PytestBenchmarkWarning: Benchmark fixture was not used at all in this test! Signed-off-by: Jan Vesely --- tests/mechanisms/test_lca.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/mechanisms/test_lca.py b/tests/mechanisms/test_lca.py index 7374f1e0679..9996dca42d6 100644 --- a/tests/mechanisms/test_lca.py +++ b/tests/mechanisms/test_lca.py @@ -185,12 +185,14 @@ def test_LCAMechanism_threshold_with_convergence(self, benchmark, comp_mode): comp = Composition() comp.add_node(lca) - result = comp.run(inputs={lca:[0,1,2]}, execution_mode=comp_mode) - np.testing.assert_allclose(result, [[0.19153799, 0.5, 0.80846201]]) + def func(*args, **kwargs): + res = comp.run(*args, **kwargs) + return (res, lca.num_executions_before_finished) + + results = benchmark(func, inputs={lca:[0,1,2]}, execution_mode=comp_mode) + np.testing.assert_allclose(results[0], [[0.19153799, 0.5, 0.80846201]]) if comp_mode is pnl.ExecutionMode.Python: - assert lca.num_executions_before_finished == 18 - if benchmark.enabled: - benchmark(comp.run, inputs={lca:[0,1,2]}, execution_mode=comp_mode) + assert results[1] == 18 @pytest.mark.composition @pytest.mark.lca_mechanism From a6242aa515f1340cbe90e24f4edac3ee4b277cc6 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Jan 2024 16:38:10 -0500 Subject: [PATCH 24/65] tests/control: Only consider first 10 values in checks Makes the test benchmark agnostic. Remove explicit benchmark re-run from the test. Fixes: PytestBenchmarkWarning: Benchmark fixture was not used at all in this test! Signed-off-by: Jan Vesely --- tests/composition/test_control.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index bda69ad4353..68f96ae8fed 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -3710,22 +3710,17 @@ def test_grid_search_random_selection(self, comp_mode, benchmark): inputs = {A: [[[1.0]]]} - comp.run(inputs=inputs, num_trials=10, context='outer_comp', execution_mode=comp_mode) - np.testing.assert_allclose(comp.results, [[[0.7310585786300049]], [[0.999999694097773]], [[0.999999694097773]], [[0.9999999979388463]], [[0.9999999979388463]], [[0.999999694097773]], [[0.9999999979388463]], [[0.999999999986112]], [[0.999999694097773]], [[0.9999999999999993]]]) + benchmark(comp.run, inputs=inputs, num_trials=10, context='outer_comp', execution_mode=comp_mode) + np.testing.assert_allclose(comp.results[:10], + [[[0.7310585786300049]], [[0.999999694097773]], [[0.999999694097773]], [[0.9999999979388463]], [[0.9999999979388463]], + [[0.999999694097773]], [[0.9999999979388463]], [[0.999999999986112]], [[0.999999694097773]], [[0.9999999999999993]]]) # control signal value (mod slope) is chosen randomly from all of the control signal values # that correspond to a net outcome of 1 if comp_mode is pnl.ExecutionMode.Python: log_arr = A.log.nparray_dictionary() np.testing.assert_allclose([[1.], [15.], [15.], [20.], [20.], [15.], [20.], [25.], [15.], [35.]], - log_arr['outer_comp']['mod_slope']) - - if benchmark.enabled: - # Disable logging for the benchmark run - A.log.set_log_conditions(items="mod_slope", log_condition=LogCondition.OFF) - A.log.clear_entries() - benchmark(comp.run, inputs=inputs, num_trials=10, context='bench_outer_comp', execution_mode=comp_mode) - assert len(A.log.get_logged_entries()) == 0 + log_arr['outer_comp']['mod_slope'][:10]) def test_input_CIM_assignment(self, comp_mode): From 52c89dca508e545f6a2519cba9270775bba5cf6e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 6 Jan 2024 15:07:19 -0500 Subject: [PATCH 25/65] setup: Remove stale warning filter. Having multiple parameter ports for parameters of the same name is now an error. There is no difference in emitted warnings with or without this filter. Signed-off-by: Jan Vesely --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 36b832f292d..318879c8f93 100644 --- a/setup.cfg +++ b/setup.cfg @@ -69,7 +69,6 @@ xfail_strict = True filterwarnings = error:Creating an ndarray from ragged nested sequences \(which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes\) is deprecated.*:numpy.VisibleDeprecationWarning error:Invalid escape sequence - ignore:Multiple ParameterPorts:UserWarning [pycodestyle] # for code explanation see https://pep8.readthedocs.io/en/latest/intro.html#error-codes From fc75cd2248efa3d6534f54167dffad61dceeb52b Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 6 Jan 2024 15:14:04 -0500 Subject: [PATCH 26/65] tests/Composition: Remove spurious return statement Tests should not return anything. Fixes: PytestReturnNotNoneWarning: Expected None, but tests/composition/test_composition.py::TestNestedCompositions::test_invalid_projection_deletion_when_nesting_comps returned (Composition ocomp), which will be an error in a future version of pytest. Did you mean to use `assert` instead of `return`? Signed-off-by: Jan Vesely --- tests/composition/test_composition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 6f2381f3772..e8178ae6e52 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -4928,7 +4928,7 @@ def test_invalid_projection_deletion_when_nesting_comps(self): allocation_samples=pnl.SampleSpec(start=1.0, stop=5.0, num=5))]) ) assert not ocomp._check_for_existing_projections(sender=ib, receiver=ocomp_objective_mechanism) - return ocomp + # # Does not work yet due to initialize_cycle_values bug that causes first recurrent projection to pass different values # # to TranfserMechanism version vs Logistic fn + AdaptiveIntegrator fn version # def test_recurrent_transfer_mechanism_composition(self): From dbb30cb326df7e49b365a464c48f73a10b257c81 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 6 Jan 2024 20:33:47 -0500 Subject: [PATCH 27/65] tests: Don't use deprecated pytest.warns(None) The construct has been deprecated in pytest warns about it: PytestRemovedIn8Warning: Passing None has been deprecated. See https://docs.pytest.org/en/latest/how-to/capture-warnings.html#additional-use-cases-of-warnings-in-tests for alternatives in common use cases. Address three different situations in tests in a way that does not hide unrelated warnings. * test_composition.py::*_subset_duplicate_warnings: capture all warnings and search them for the expected message if verbosity == True assert that there are no warnings if verbosity == False * test_projection_specifications.py::test_no_warning_when_matrix_specified: Use filterwarnings mark to change the undesired warning into an error * test_control.py::test_model_based_num_estimates Use null context or pytest.warns context based on whether warning is expected Signed-off-by: Jan Vesely --- tests/composition/test_composition.py | 46 +++++++++++-------- tests/composition/test_control.py | 22 ++++----- .../test_projection_specifications.py | 32 ++++++------- 3 files changed, 53 insertions(+), 47 deletions(-) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index e8178ae6e52..29c1af6658d 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -1185,19 +1185,22 @@ def test_add_processing_pathway_subset_duplicate_warning(self, verbosity): C = TransferMechanism() comp = Composition() - comp.add_linear_processing_pathway(pathway=[A,B,C]) + comp.add_linear_processing_pathway(pathway=[A, B, C]) + comp.verbosePref = PreferenceEntry(verbosity, PreferenceLevel.INSTANCE) + + with warnings.catch_warnings(record=True) as msgs: + comp.add_linear_processing_pathway(pathway=[A, B]) - # Test for warning if verbosePref is set to True if verbosity: - regexp = f"Pathway specified in 'pathway' arg for add_linear_processing_pathway method of '{comp.name}' " \ - f"has a subset of nodes in a Pathway already in '{comp.name}': Pathway-0; the latter will be used." - with pytest.warns(UserWarning, match=regexp): - comp.verbosePref = PreferenceEntry(True, PreferenceLevel.INSTANCE) - comp.add_linear_processing_pathway(pathway=[A,B]) + # Test for warning if verbosePref is set to True + warning = f"Pathway specified in 'pathway' arg for add_linear_processing_pathway method of '{comp.name}' " \ + f"has a subset of nodes in a Pathway already in '{comp.name}': Pathway-0; the latter will be used." + + # The above issues 2 warnings, but we only test for one of them here + assert any(str(m.message) == warning for m in msgs), list(str(m.message) for m in msgs) else: - # Test for suppression of warning if verbosePref not set - with pytest.warns(None): - comp.add_linear_processing_pathway(pathway=[A,B]) + # Test for suppression of warning if verbosePref is not set + assert len(msgs) == 0 def test_add_backpropagation_pathway_exact_duplicate_warning(self): A = TransferMechanism() @@ -1230,19 +1233,24 @@ def test_add_backpropagation_pathway_contiguous_subset_duplicate_warning(self, v B = TransferMechanism() C = TransferMechanism() comp = Composition() - comp.add_backpropagation_learning_pathway(pathway=[A,B,C]) + comp.add_backpropagation_learning_pathway(pathway=[A, B, C]) + + comp.verbosePref = PreferenceEntry(verbosity, PreferenceLevel.INSTANCE) + + with warnings.catch_warnings(record=True) as msgs: + comp.add_backpropagation_learning_pathway(pathway=[A, B]) - # Test for warning if verbosePref is set to True if verbosity: - regexp = f"Pathway specified in 'pathway' arg for add_backpropagation_learning_pathway method of '{comp.name}'" \ - f" has a subset of nodes in a Pathway already in '{comp.name}':.*; the latter will be used." - with pytest.warns(UserWarning, match=regexp): - comp.verbosePref = PreferenceEntry(True, PreferenceLevel.INSTANCE) - comp.add_backpropagation_learning_pathway(pathway=[A,B]) + # Test for warning if verbosePref is set to True + warning = f"Pathway specified in 'pathway' arg for add_backpropagation_learning_pathway method of '{comp.name}'" \ + f" has a subset of nodes in a Pathway already in '{comp.name}': Pathway-0; the latter will be used." + + # The above issues 2 warnings, but we only test for one of them here + assert any(str(m.message) == warning for m in msgs), list(str(m.message) for m in msgs) else: # Test for suppression of warning if verbosePref is not set - with pytest.warns(None): - comp.add_backpropagation_learning_pathway(pathway=[A,B]) + assert len(msgs) == 0 + def test_add_processing_pathway_non_contiguous_subset_is_OK(self): A = TransferMechanism() diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 68f96ae8fed..0533cd934c2 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1,5 +1,5 @@ +import contextlib import re - import numpy as np import pytest @@ -3587,27 +3587,27 @@ def test_model_based_num_estimates(self, num_estimates, rand_var): intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) - warning_type = None + warning_msg = f"'OptimizationControlMechanism-0' has 'num_estimates = {num_estimates}' specified, " \ + f"but its 'agent_rep' \\('comp'\\) has no random variables: " \ + f"'RANDOMIZATION_CONTROL_SIGNAL' will not be created, and num_estimates set to None." + if num_estimates and not rand_var: - warning_type = UserWarning - warning_msg = f'"\'OptimizationControlMechanism-0\' has \'num_estimates = {num_estimates}\' specified, ' \ - f'but its \'agent_rep\' (\'comp\') has no random variables: ' \ - f'\'RANDOMIZATION_CONTROL_SIGNAL\' will not be created, and num_estimates set to None."' - with pytest.warns(warning_type) as warnings: + warning_context = pytest.warns(UserWarning, match=warning_msg) + else: + warning_context = contextlib.nullcontext() + + with warning_context: ocm = pnl.OptimizationControlMechanism(agent_rep=comp, state_features=[A.input_port], objective_mechanism=objective_mech, function=pnl.GridSearch(), num_estimates=num_estimates, control_signals=[control_signal]) - if warning_type: - assert any(warning_msg == repr(w.message.args[0]) for w in warnings) comp.add_controller(ocm) inputs = {A: [[[1.0]]]} - comp.run(inputs=inputs, - num_trials=2) + comp.run(inputs=inputs, num_trials=2) if not num_estimates or not rand_var: assert pnl.RANDOMIZATION_CONTROL_SIGNAL not in comp.controller.control_signals # Confirm no estimates diff --git a/tests/projections/test_projection_specifications.py b/tests/projections/test_projection_specifications.py index b1602f131fe..adee0838155 100644 --- a/tests/projections/test_projection_specifications.py +++ b/tests/projections/test_projection_specifications.py @@ -462,25 +462,23 @@ def test_formats_for_gating_specification_of_input_and_output_ports(self, input_ # assert 'Primary OutputPort of ControlMechanism-1 (ControlSignal-0) ' \ # 'cannot be used as a sender of a Projection to OutputPort of T2' in error_text.value.args[0] + @pytest.mark.filterwarnings("error:elementwise comparison failed; returning scalar instead:") def test_no_warning_when_matrix_specified(self): - with pytest.warns(None) as w: - c = pnl.Composition() - m0 = pnl.ProcessingMechanism( - default_variable=[0, 0, 0, 0] - ) - p0 = pnl.MappingProjection( - matrix=[[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]] - ) - m1 = pnl.TransferMechanism( - default_variable=[0, 0, 0, 0] - ) - c.add_linear_processing_pathway([m0, p0, m1]) - for warn in w: - assert 'elementwise comparison failed; returning scalar instead' not in warn.message.args[0] + c = pnl.Composition() + m0 = pnl.ProcessingMechanism( + default_variable=[0, 0, 0, 0] + ) + p0 = pnl.MappingProjection( + matrix=[[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + ) + m1 = pnl.TransferMechanism( + default_variable=[0, 0, 0, 0] + ) + c.add_linear_processing_pathway([m0, p0, m1]) # KDM: this is a good candidate for pytest.parametrize def test_masked_mapping_projection(self): From a532afb7147dd7449719dcc10172671415979691 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 7 Jan 2024 09:59:41 -0500 Subject: [PATCH 28/65] tests/MemoryFunctions: Escape special characters in regexp Square brackets '[' denote a match set and need to be escaped. Fixes: FutureWarning: Possible nested set at position ... Signed-off-by: Jan Vesely --- tests/functions/test_memory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index 2ccb11e032a..c4cc72707d5 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -444,7 +444,7 @@ def test_DictionaryMemory_without_assoc(self): def test_DictionaryMemory_with_duplicate_entry_in_initializer_warning(self): - regexp = r'Attempt to initialize memory of DictionaryMemory with an entry \([[1 2 3]' + regexp = r'Attempt to initialize memory of DictionaryMemory with an entry \(\[\[1 2 3\]' with pytest.warns(UserWarning, match=regexp): em = EpisodicMemoryMechanism( name='EPISODIC MEMORY MECH', @@ -1034,7 +1034,7 @@ def test_ContentAddressableMemory_without_initializer_and_diff_field_sizes(self) def test_ContentAddressableMemory_with_duplicate_entry_in_initializer_warning(self): - regexp = r'Attempt to initialize memory of ContentAddressableMemory with an entry \([[1 2 3]' + regexp = r'Attempt to initialize memory of ContentAddressableMemory with an entry \(\[\[1 2 3\]' with pytest.warns(UserWarning, match=regexp): c = ContentAddressableMemory( initializer=np.array([[[1,2,3], [4,5,6]], From 58b7f4569127b65465342820e507f4b20e465ce6 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 11 Jan 2024 23:12:26 +0000 Subject: [PATCH 29/65] MDF: write_mdf_file: clarify documentation on fmt arg --- psyneulink/core/globals/mdf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/globals/mdf.py b/psyneulink/core/globals/mdf.py index d898bb8394a..51928584f1a 100644 --- a/psyneulink/core/globals/mdf.py +++ b/psyneulink/core/globals/mdf.py @@ -1556,7 +1556,9 @@ def write_mdf_file(compositions, filename: str, path: str = None, fmt: str = Non not specified then the current directory is used. fmt : str - specifies file format of output. Current options ('json', 'yml'/'yaml') + specifies file format of output. Auto-detect based on + **filename** extension if None. + Current options: 'json', 'yml'/'yaml' simple_edge_format : bool specifies use of From 1fea7a97f78fd279a21c56294ab61807aaef5f1a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 11 Jan 2024 23:12:48 +0000 Subject: [PATCH 30/65] MDF: write_mdf_file: fix bug in detecting filename extension --- psyneulink/core/globals/mdf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/globals/mdf.py b/psyneulink/core/globals/mdf.py index 51928584f1a..bfe11388124 100644 --- a/psyneulink/core/globals/mdf.py +++ b/psyneulink/core/globals/mdf.py @@ -1569,8 +1569,8 @@ def write_mdf_file(compositions, filename: str, path: str = None, fmt: str = Non if fmt is None: try: - fmt = re.match(r'(.*)\.(.*)$', filename).groups(1) - except AttributeError: + fmt = re.match(r'(.*)\.(.*)$', filename).groups()[1] + except (AttributeError, IndexError): fmt = 'json' if path is not None: From bc061140f28a9d596cc56d85ed431326990df74f Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 9 Jan 2024 08:02:45 +0000 Subject: [PATCH 31/65] MDF: add NodeRole.LEARNING as an exclusion for mechanisms LearningMechanism were already excluded, but auto-generated target mechanisms (as ProcessingMechanism) were not. This affected result shapes --- psyneulink/core/globals/mdf.py | 48 +++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/psyneulink/core/globals/mdf.py b/psyneulink/core/globals/mdf.py index bfe11388124..1a1d9f7e333 100644 --- a/psyneulink/core/globals/mdf.py +++ b/psyneulink/core/globals/mdf.py @@ -954,6 +954,10 @@ def _generate_composition_string(graph, component_identifiers): psyneulink.LearningMechanism, psyneulink.LearningProjection, ) + implicit_roles = ( + psyneulink.NodeRole.LEARNING, + ) + output = [] comp_identifer = parse_valid_identifier(graph.id) @@ -1090,6 +1094,22 @@ def alphabetical_order(items): control_mechanisms = [] implicit_mechanisms = [] + try: + node_roles = { + parse_valid_identifier(node): role for (node, role) in + graph.metadata['required_node_roles'] + } + except KeyError: + node_roles = [] + + try: + excluded_node_roles = { + parse_valid_identifier(node): role for (node, role) in + graph.metadata['excluded_node_roles'] + } + except KeyError: + excluded_node_roles = [] + # add nested compositions and mechanisms in order they were added # to this composition for node in sorted( @@ -1104,10 +1124,19 @@ def alphabetical_order(items): except (AttributeError, KeyError): component_type = default_node_type identifier = parse_valid_identifier(node.id) + + try: + node_role = eval(_parse_parameter_value(node_roles[identifier])) + except (KeyError, TypeError): + node_role = None + if issubclass(component_type, control_mechanism_types): control_mechanisms.append(node) component_identifiers[identifier] = True - elif issubclass(component_type, implicit_types): + elif ( + issubclass(component_type, implicit_types) + or node_role in implicit_roles + ): implicit_mechanisms.append(node) else: mechanisms.append(node) @@ -1166,23 +1195,6 @@ def alphabetical_order(items): if len(compositions) > 0: output.append('') - # generate string to add the nodes to this Composition - try: - node_roles = { - parse_valid_identifier(node): role for (node, role) in - graph.metadata['required_node_roles'] - } - except KeyError: - node_roles = [] - - try: - excluded_node_roles = { - parse_valid_identifier(node): role for (node, role) in - graph.metadata['excluded_node_roles'] - } - except KeyError: - excluded_node_roles = [] - # do not add the controller as a normal node try: controller_name = graph.metadata['controller']['id'] From 55eff7f6ae828e30179febe7e8001b4b2cfaa68e Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 6 Jan 2024 05:43:23 +0000 Subject: [PATCH 32/65] MDF: fix bug in detecting imported modules in script --- psyneulink/core/globals/mdf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/globals/mdf.py b/psyneulink/core/globals/mdf.py index 1a1d9f7e333..a422db02620 100644 --- a/psyneulink/core/globals/mdf.py +++ b/psyneulink/core/globals/mdf.py @@ -1395,10 +1395,11 @@ def get_declared_identifiers(model): for i in range(len(comp_strs)): # greedy and non-greedy for cs in comp_strs[i]: - potential_module_names = set([ + cs_potential_names = set([ *re.findall(r'([A-Za-z_\.]+)\.', cs), *re.findall(r'([A-Za-z_\.]+?)\.', cs) ]) + potential_module_names.update(cs_potential_names) for module in potential_module_names: if module not in component_identifiers: From 12cffacce160510f30f652646faf323dfa338545 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 7 Jan 2024 11:23:45 -0500 Subject: [PATCH 33/65] Functions/MemoryFunctions: Do not use comparisons to empty list Convert tested value to np.array and check its size. Fixes: DeprecationWarning: The truth value of an empty array is ambiguous. ... DeprecationWarning: elementwise comparison failed; ... Signed-off-by: Jan Vesely --- .../core/components/functions/stateful/memoryfunctions.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index bdd7c81d369..c87886b5c52 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -301,7 +301,7 @@ def reset(self, previous_value=None, context=None): if previous_value is None: previous_value = self._get_current_parameter_value("initializer", context) - if previous_value is None or previous_value == []: + if previous_value is None or np.asarray(previous_value).size == 0: self.parameters.previous_value._get(context).clear() value = deque([], maxlen=self.parameters.history.get(context)) @@ -1752,7 +1752,7 @@ def _get_distance(self, cue:Union[list, np.ndarray], field_weights = self._get_current_parameter_value('distance_field_weights', context) # Set any items in field_weights to None if they are None or an empty list: field_weights = np.atleast_1d([None if - fw is None or fw == [] or isinstance(fw, np.ndarray) and fw.tolist()==[] + fw is None or np.asarray(fw).size == 0 else fw for fw in field_weights]) if granularity == 'per_field': @@ -1763,7 +1763,7 @@ def _get_distance(self, cue:Union[list, np.ndarray], if len(field_weights)==1: field_weights = np.full(num_fields, field_weights[0]) for i in range(num_fields): - if not any([item is None or item == [] or isinstance(item, np.ndarray) and item.tolist() == [] + if not any([item is None or np.asarray(item).size == 0 for item in [cue[i], candidate[i], field_weights[i]]]): distances_by_field[i] = distance_fct([cue[i], candidate[i]]) * field_weights[i] return list(distances_by_field) @@ -2623,7 +2623,7 @@ def reset(self, previous_value=None, context=None): if previous_value is None: previous_value = self._get_current_parameter_value("initializer", context) - if previous_value == []: + if np.asarray(previous_value).size == 0: value = np.ndarray(shape=(2, 0, len(self.defaults.variable[0]))) self.parameters.previous_value._set(copy.deepcopy(value), context) From fcd842c13812382cff4808b34d3a2efb02bb3657 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 11 Jan 2024 13:58:39 -0500 Subject: [PATCH 34/65] Distance, LinearCombination: Only compute np.log if needed The expression replaces calculated np.log values with 0 if both v1 and v2 values at a given index are 0. Skip the computation of np.log(v2) in this case to avoid invalid values. Fixes: RuntimeWarning: divide by zero encountered in log RuntimeWarning: invalid value encountered in multiply Signed-off-by: Jan Vesely --- .../components/functions/nonstateful/combinationfunctions.py | 3 ++- .../components/functions/nonstateful/objectivefunctions.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index 35d955ee435..564dcc6a73d 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -1442,7 +1442,8 @@ def _function(self, elif operation == CROSS_ENTROPY: v1 = variable[0] v2 = variable[1] - combination = np.where(np.logical_and(v1 == 0, v2 == 0), 0.0, v1 * np.log(v2)) + both_zero = np.logical_and(v1 == 0, v2 == 0) + combination = v1 * np.where(both_zero, 0.0, np.log(v2, where=np.logical_not(both_zero))) else: raise FunctionError("Unrecognized operator ({0}) for LinearCombination function". format(operation.self.Operation.SUM)) diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 190c0b764e4..bdb5d072c18 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -1207,7 +1207,8 @@ def _function(self, # MODIFIED CW 3/20/18: avoid divide by zero error by plugging in two zeros # FIX: unsure about desired behavior when v2 = 0 and v1 != 0 # JDC: returns [inf]; leave, and let it generate a warning or error message for user - result = -np.sum(np.where(np.logical_and(v1 == 0, v2 == 0), 0.0, v1 * np.log(v2))) + both_zero = np.logical_and(v1 == 0, v2 == 0) + result = -np.sum(v1 * np.where(both_zero, 0.0, np.log(v2, where=np.logical_not(both_zero)))) # Energy elif self.metric == ENERGY: From 0e960c1e051e7422a2de09ca6d453c876d7dff64 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 11 Jan 2024 18:45:32 -0500 Subject: [PATCH 35/65] Port/_parse_port_spec: Convert value to string to compare to keyword Otherwise they might end up compared as containers. Fixes: FutureWarning: elementwise comparison failed; ... Signed-off-by: Jan Vesely --- psyneulink/core/components/ports/port.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index dc797799d53..8f5b6db03fd 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -3002,7 +3002,7 @@ def _parse_port_spec(port_type=None, port_type_name = port_type.__name__ proj_is_feedback = False - if isinstance(port_specification, tuple) and port_specification[1] == FEEDBACK: + if isinstance(port_specification, tuple) and str(port_specification[1]) == FEEDBACK: port_specification = port_specification[0] proj_is_feedback = True From 61e1a4d2ea1c29acef7f4ba05de1782a0fe0951b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 6 Jan 2024 05:03:17 +0000 Subject: [PATCH 36/65] tests: MDF: correct implicit use of locals in exec --- tests/mdf/test_mdf.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index a8ae9b7ddde..fa86bf3ad9f 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -125,7 +125,7 @@ def test_write_json_file( # Save json_summary of Composition to file and read back in. json_filename = filename.replace('.py','.json') - exec(f'pnl.write_json_file({composition_name}, json_filename, simple_edge_format=simple_edge_format)') + exec(f'pnl.write_json_file({composition_name}, "{json_filename}", simple_edge_format={simple_edge_format})') exec(pnl.generate_script_from_json(json_filename)) # exec(f'{composition_name}.run(inputs={input_dict_str})') exec(f'pnl.get_compositions()[0].run(inputs={input_dict_str})') @@ -165,7 +165,7 @@ def test_write_json_file_multiple_comps( # Save json_summary of Composition to file and read back in. json_filename = filename.replace('.py', '.json') - exec(f'pnl.write_json_file([{",".join(input_dict_strs)}], json_filename)') + exec(f'pnl.write_json_file([{",".join(input_dict_strs)}], "{json_filename}")') exec(pnl.generate_script_from_json(json_filename)) for composition_name in input_dict_strs: @@ -316,15 +316,20 @@ def test_mdf_equivalence_individual_functions(mech_type, function, runtime_param assert pnl.safe_equals(comp.results, _get_mdf_model_results(eg)) -@pytest.mark.parametrize('filename', ['model_basic.py']) +@pytest.mark.parametrize( + 'filename, composition_name', + [ + ('model_basic.py', 'comp'), + ] +) @pytest.mark.parametrize('fmt', ['json', 'yml']) -def test_generate_script_from_mdf(filename, fmt): +def test_generate_script_from_mdf(filename, composition_name, fmt): filename = os.path.join(os.path.dirname(__file__), filename) outfi = filename.replace('.py', f'.{fmt}') with open(filename, 'r') as orig_file: exec(orig_file.read()) - serialized = eval(f'pnl.get_mdf_serialized(comp, fmt="{fmt}")') + serialized = eval(f'pnl.get_mdf_serialized({composition_name}, fmt="{fmt}")') with open(outfi, 'w') as f: f.write(serialized) From 6a298c28c9d8a9fe876bce6ff2467974daeb4581 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 20 Jul 2023 02:43:37 +0000 Subject: [PATCH 37/65] tests: MDF: test equivalence using np assert_array_equal --- tests/mdf/test_mdf.py | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index fa86bf3ad9f..e97846030b6 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -174,11 +174,35 @@ def test_write_json_file_multiple_comps( assert orig_results[composition_name] == final_results, f'{composition_name}:' -def _get_mdf_model_results(evaluable_graph): - return [ - [eo.curr_value for _, eo in evaluable_graph.enodes[node.id].evaluable_outputs.items()] - for node in evaluable_graph.scheduler.consideration_queue[-1] - ] +def _get_mdf_model_results(evaluable_graph, composition=None): + """ + Returns psyneulink-style output for **evaluable_graph**, optionally + casting outputs to their equivalent node's shape in **composition** + """ + if composition is not None: + node_output_shapes = { + # NOTE: would use defaults.value here, but it doesn't always + # match the shape of value (specifically here, + # FitzHughNagumoIntegrator EULER) + pnl.parse_valid_identifier(node.name): node.value.shape + for node in composition.get_nodes_by_role(pnl.NodeRole.OUTPUT) + } + else: + node_output_shapes = {} + + res = [] + for node in evaluable_graph.scheduler.consideration_queue[-1]: + next_res_elem = [ + eo.curr_value for eo in evaluable_graph.enodes[node.id].evaluable_outputs.values() + ] + try: + next_res_elem = np.reshape(next_res_elem, node_output_shapes[node.id]) + except KeyError: + pass + + res.append(next_res_elem) + + return pnl.convert_to_np_array(res) # These runtime_params are necessary because noise seeding is not @@ -240,13 +264,14 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for # Save json_summary of Composition to file and read back in. json_filename = filename.replace('.py', '.json') - pnl.write_json_file(eval(composition_name), json_filename, simple_edge_format=simple_edge_format) + composition = eval(composition_name) + pnl.write_json_file(composition, json_filename, simple_edge_format=simple_edge_format) m = load_mdf(json_filename) eg = ee.EvaluableGraph(m.graphs[0], verbose=True) eg.evaluate(initializer={f'{node}_InputPort_0': i for node, i in input_dict.items()}) - assert pnl.safe_equals(orig_results, _get_mdf_model_results(eg)) + np.testing.assert_array_equal(orig_results, _get_mdf_model_results(eg, composition)) ddi_termination_conds = [ @@ -313,7 +338,7 @@ def test_mdf_equivalence_individual_functions(mech_type, function, runtime_param eg = ee.EvaluableGraph(model.graphs[0], verbose=True) eg.evaluate(initializer={'A_InputPort_0': 1.0}) - assert pnl.safe_equals(comp.results, _get_mdf_model_results(eg)) + np.testing.assert_array_equal(comp.results, _get_mdf_model_results(eg, comp)) @pytest.mark.parametrize( From 513895cbeb097cca78193dd8630bcbba279cce8a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 6 Jan 2024 05:39:26 +0000 Subject: [PATCH 38/65] tests: MDF: standardize model_backprop object naming Not specifying the variable name to the Component name caused the reproduced script to name the equivalent Components differently. This resulted, in conjunction with polluted exec globals/locals, with false passing results comparison tests --- tests/mdf/model_backprop.py | 10 +++++----- tests/mdf/test_mdf.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/mdf/model_backprop.py b/tests/mdf/model_backprop.py index 2639a433c42..d21ff428e35 100644 --- a/tests/mdf/model_backprop.py +++ b/tests/mdf/model_backprop.py @@ -1,10 +1,10 @@ import psyneulink as pnl -a = pnl.TransferMechanism() -b = pnl.TransferMechanism() -c = pnl.TransferMechanism() +A = pnl.TransferMechanism(name='A') +B = pnl.TransferMechanism(name='B') +C = pnl.TransferMechanism(name='C') -p = pnl.Pathway(pathway=[a, b, c]) +p = pnl.Pathway(pathway=[A, B, C]) -comp = pnl.Composition() +comp = pnl.Composition(name='comp') comp.add_backpropagation_learning_pathway(pathway=p) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index e97846030b6..2addfed7cb6 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -72,7 +72,7 @@ def get_onnx_fixed_noise_str(onnx_op, **kwargs): str(stroop_stimuli).replace("'", ''), False ), - ('model_backprop.py', 'comp', '{a: [1, 2, 3]}', False), + ('model_backprop.py', 'comp', '{A: [1, 2, 3]}', False), ] From 20bae8c356eea1cb39258a7da5733ca39cf86e1d Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 5 Jan 2024 05:08:09 +0000 Subject: [PATCH 39/65] tests: MDF: rework. share code. correct shared state globals/locals state was incorrectly shared between reference and test model output due to use of eval/exec --- tests/mdf/test_mdf.py | 157 ++++++++++++++++++++++++++++-------------- 1 file changed, 106 insertions(+), 51 deletions(-) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index 2addfed7cb6..2ffa4241a29 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -76,6 +76,57 @@ def get_onnx_fixed_noise_str(onnx_op, **kwargs): ] +def read_defined_model_script(filename): + filename = os.path.join(os.path.dirname(__file__), filename) + + with open(filename, 'r') as orig_file: + model_input = orig_file.read() + + return model_input + + +def get_loaded_model_state(model_input: str): + _globals = copy.copy(globals()) + _locals = copy.copy(locals()) + + exec(model_input, _globals, _locals) + + return _globals, _locals + + +def run_compositions_in_state( + composition_input_strs, _globals, _locals, extra_run_args_str='' +): + results = {} + + for comp_name, inputs in composition_input_strs.items(): + exec(f'{comp_name}.run(inputs={inputs}, {extra_run_args_str})', _globals, _locals) + results[comp_name] = eval(f'{comp_name}.results', _globals, _locals) + + return results, _globals, _locals + + +def get_model_results_and_state( + model_input: str, composition_input_strs, extra_run_args_str='' +): + _globals, _locals = get_loaded_model_state(model_input) + return run_compositions_in_state( + composition_input_strs, _globals, _locals, extra_run_args_str + ) + + +def assert_result_equality(orig_results, new_results): + # compositions + assert orig_results.keys() == new_results.keys() + + for comp_name in orig_results: + np.testing.assert_allclose( + orig_results[comp_name], + new_results[comp_name], + err_msg=f"Results for composition '{comp_name}' are not equal:" + ) + + @pytest.mark.parametrize( 'filename, composition_name, input_dict_str, simple_edge_format', json_results_parametrization @@ -86,21 +137,25 @@ def test_json_results_equivalence( input_dict_str, simple_edge_format, ): + comp_inputs = {composition_name: input_dict_str} + # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - exec(f'{composition_name}.run(inputs={input_dict_str})') - orig_results = eval(f'{composition_name}.results') + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, comp_inputs + ) # reset random seed pnl.core.globals.utilities.set_global_seed(0) # Generate python script from JSON summary of composition and execute - json_summary = pnl.generate_json(eval(f'{composition_name}'), simple_edge_format=simple_edge_format) - exec(pnl.generate_script_from_json(json_summary)) - exec(f'{composition_name}.run(inputs={input_dict_str})') - new_results = eval(f'{composition_name}.results') - assert pnl.safe_equals(orig_results, new_results) + json_summary = pnl.generate_json( + eval(f'{composition_name}', orig_globals, orig_locals), + simple_edge_format=simple_edge_format + ) + new_script = pnl.generate_script_from_json(json_summary) + new_results, _, _ = get_model_results_and_state(new_script, comp_inputs) + + assert_result_equality(orig_results, new_results) @pytest.mark.parametrize( @@ -113,24 +168,29 @@ def test_write_json_file( input_dict_str, simple_edge_format, ): + comp_inputs = {composition_name: input_dict_str} + # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - exec(f'{composition_name}.run(inputs={input_dict_str})') - orig_results = eval(f'{composition_name}.results') + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, comp_inputs + ) # reset random seed pnl.core.globals.utilities.set_global_seed(0) # Save json_summary of Composition to file and read back in. json_filename = filename.replace('.py','.json') - exec(f'pnl.write_json_file({composition_name}, "{json_filename}", simple_edge_format={simple_edge_format})') - exec(pnl.generate_script_from_json(json_filename)) - # exec(f'{composition_name}.run(inputs={input_dict_str})') - exec(f'pnl.get_compositions()[0].run(inputs={input_dict_str})') - final_results = eval(f'{composition_name}.results') - assert pnl.safe_equals(orig_results, final_results) + exec( + f'pnl.write_json_file({composition_name}, "{json_filename}", simple_edge_format={simple_edge_format})', + orig_globals, + orig_locals, + ) + + new_script = pnl.generate_script_from_json(json_filename) + new_results, _, _ = get_model_results_and_state(new_script, comp_inputs) + + assert_result_equality(orig_results, new_results) @pytest.mark.parametrize( @@ -148,30 +208,27 @@ def test_write_json_file_multiple_comps( filename, input_dict_strs, ): - orig_results = {} - # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - - for composition_name in input_dict_strs: - exec(f'{composition_name}.run(inputs={input_dict_strs[composition_name]})') - orig_results[composition_name] = eval(f'{composition_name}.results') - + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, input_dict_strs + ) # reset random seed pnl.core.globals.utilities.set_global_seed(0) # Save json_summary of Composition to file and read back in. json_filename = filename.replace('.py', '.json') - exec(f'pnl.write_json_file([{",".join(input_dict_strs)}], "{json_filename}")') - exec(pnl.generate_script_from_json(json_filename)) + exec( + f'pnl.write_json_file([{",".join(input_dict_strs)}], "{json_filename}")', + orig_globals, + orig_locals + ) + + new_script = pnl.generate_script_from_json(json_filename) + new_results, _, _ = get_model_results_and_state(new_script, input_dict_strs) - for composition_name in input_dict_strs: - exec(f'{composition_name}.run(inputs={input_dict_strs[composition_name]})') - final_results = eval(f'{composition_name}.results') - assert orig_results[composition_name] == final_results, f'{composition_name}:' + assert_result_equality(orig_results, new_results) def _get_mdf_model_results(evaluable_graph, composition=None): @@ -254,24 +311,24 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for from modeci_mdf.utils import load_mdf import modeci_mdf.execution_engine as ee + comp_inputs = {composition_name: input_dict} + # Get python script from file and execute - filename = os.path.join(os.path.dirname(__file__), filename) - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - inputs_str = str(input_dict).replace("'", '') - exec(f'{composition_name}.run(inputs={inputs_str}, {run_args})') - orig_results = eval(f'{composition_name}.results') + orig_script = read_defined_model_script(filename) + orig_results, orig_globals, orig_locals = get_model_results_and_state( + orig_script, comp_inputs, run_args + ) # Save json_summary of Composition to file and read back in. json_filename = filename.replace('.py', '.json') - composition = eval(composition_name) + composition = eval(composition_name, orig_globals, orig_locals) pnl.write_json_file(composition, json_filename, simple_edge_format=simple_edge_format) m = load_mdf(json_filename) eg = ee.EvaluableGraph(m.graphs[0], verbose=True) eg.evaluate(initializer={f'{node}_InputPort_0': i for node, i in input_dict.items()}) - np.testing.assert_array_equal(orig_results, _get_mdf_model_results(eg, composition)) + assert_result_equality(orig_results, {composition_name: _get_mdf_model_results(eg, composition)}) ddi_termination_conds = [ @@ -349,13 +406,11 @@ def test_mdf_equivalence_individual_functions(mech_type, function, runtime_param ) @pytest.mark.parametrize('fmt', ['json', 'yml']) def test_generate_script_from_mdf(filename, composition_name, fmt): - filename = os.path.join(os.path.dirname(__file__), filename) - outfi = filename.replace('.py', f'.{fmt}') - - with open(filename, 'r') as orig_file: - exec(orig_file.read()) - serialized = eval(f'pnl.get_mdf_serialized({composition_name}, fmt="{fmt}")') + orig_file = read_defined_model_script(filename) + exec(orig_file) + serialized = eval(f'pnl.get_mdf_serialized({composition_name}, fmt="{fmt}")') + outfi = filename.replace('.py', f'.{fmt}') with open(outfi, 'w') as f: f.write(serialized) From 6f3e968f3e3e108faa83830df3ec2b656d01d463 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 11 Jan 2024 23:14:20 +0000 Subject: [PATCH 40/65] tests: MDF: replace use of json-named functions with mdf-named --- tests/mdf/test_mdf.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index 2ffa4241a29..c0a42a903b5 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -147,12 +147,12 @@ def test_json_results_equivalence( # reset random seed pnl.core.globals.utilities.set_global_seed(0) - # Generate python script from JSON summary of composition and execute - json_summary = pnl.generate_json( + # Generate python script from MDF serialization of composition and execute + mdf_data = pnl.get_mdf_serialized( eval(f'{composition_name}', orig_globals, orig_locals), simple_edge_format=simple_edge_format ) - new_script = pnl.generate_script_from_json(json_summary) + new_script = pnl.generate_script_from_mdf(mdf_data) new_results, _, _ = get_model_results_and_state(new_script, comp_inputs) assert_result_equality(orig_results, new_results) @@ -179,15 +179,15 @@ def test_write_json_file( # reset random seed pnl.core.globals.utilities.set_global_seed(0) - # Save json_summary of Composition to file and read back in. - json_filename = filename.replace('.py','.json') + # Save MDF serialization of Composition to file and read back in. + mdf_fname = filename.replace('.py', '.json') exec( - f'pnl.write_json_file({composition_name}, "{json_filename}", simple_edge_format={simple_edge_format})', + f'pnl.write_mdf_file({composition_name}, "{mdf_fname}", simple_edge_format={simple_edge_format})', orig_globals, orig_locals, ) - new_script = pnl.generate_script_from_json(json_filename) + new_script = pnl.generate_script_from_mdf(mdf_fname) new_results, _, _ = get_model_results_and_state(new_script, comp_inputs) assert_result_equality(orig_results, new_results) @@ -216,16 +216,16 @@ def test_write_json_file_multiple_comps( # reset random seed pnl.core.globals.utilities.set_global_seed(0) - # Save json_summary of Composition to file and read back in. - json_filename = filename.replace('.py', '.json') + # Save MDF serialization of Composition to file and read back in. + mdf_fname = filename.replace('.py', '.json') exec( - f'pnl.write_json_file([{",".join(input_dict_strs)}], "{json_filename}")', + f'pnl.write_mdf_file([{",".join(input_dict_strs)}], "{mdf_fname}")', orig_globals, orig_locals ) - new_script = pnl.generate_script_from_json(json_filename) + new_script = pnl.generate_script_from_mdf(mdf_fname) new_results, _, _ = get_model_results_and_state(new_script, input_dict_strs) assert_result_equality(orig_results, new_results) @@ -319,12 +319,12 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for orig_script, comp_inputs, run_args ) - # Save json_summary of Composition to file and read back in. - json_filename = filename.replace('.py', '.json') + # Save MDF serialization of Composition to file and read back in. + mdf_fname = filename.replace('.py', '.json') composition = eval(composition_name, orig_globals, orig_locals) - pnl.write_json_file(composition, json_filename, simple_edge_format=simple_edge_format) + pnl.write_mdf_file(composition, mdf_fname, simple_edge_format=simple_edge_format) - m = load_mdf(json_filename) + m = load_mdf(mdf_fname) eg = ee.EvaluableGraph(m.graphs[0], verbose=True) eg.evaluate(initializer={f'{node}_InputPort_0': i for node, i in input_dict.items()}) From d33eb104186f787397c31c2f0d0601064f12c329 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 11 Jan 2024 23:28:19 +0000 Subject: [PATCH 41/65] tests: MDF: use tempfile instead of manually named files --- tests/mdf/test_mdf.py | 48 +++++++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index c0a42a903b5..faa60bb8e27 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -1,6 +1,7 @@ import copy import numpy as np import os +import sys import psyneulink as pnl import pytest @@ -76,6 +77,28 @@ def get_onnx_fixed_noise_str(onnx_op, **kwargs): ] +def get_mdf_output_file(orig_filename, tmp_path, format='json'): + """ + Returns: + tuple(pathlib.Path, str, str): + - a pytest tmp_path temp file using **orig_filename** and + **format** + - the full path to the temp file + - the full path to the temp file formatted so that it can be + used in an exec/eval string + """ + mdf_file = tmp_path / orig_filename.replace('.py', f'.{format}') + mdf_fname = str(mdf_file.absolute()) + + # need to escape backslash to use a filename in exec on windows + if sys.platform.startswith('win'): + mdf_exec_fname = mdf_fname.replace('\\', '\\\\') + else: + mdf_exec_fname = mdf_fname + + return mdf_file, mdf_fname, mdf_exec_fname + + def read_defined_model_script(filename): filename = os.path.join(os.path.dirname(__file__), filename) @@ -167,6 +190,7 @@ def test_write_json_file( composition_name, input_dict_str, simple_edge_format, + tmp_path, ): comp_inputs = {composition_name: input_dict_str} @@ -180,9 +204,9 @@ def test_write_json_file( pnl.core.globals.utilities.set_global_seed(0) # Save MDF serialization of Composition to file and read back in. - mdf_fname = filename.replace('.py', '.json') + _, mdf_fname, mdf_exec_fname = get_mdf_output_file(filename, tmp_path) exec( - f'pnl.write_mdf_file({composition_name}, "{mdf_fname}", simple_edge_format={simple_edge_format})', + f'pnl.write_mdf_file({composition_name}, "{mdf_exec_fname}", simple_edge_format={simple_edge_format})', orig_globals, orig_locals, ) @@ -207,6 +231,7 @@ def test_write_json_file( def test_write_json_file_multiple_comps( filename, input_dict_strs, + tmp_path, ): # Get python script from file and execute orig_script = read_defined_model_script(filename) @@ -217,10 +242,9 @@ def test_write_json_file_multiple_comps( pnl.core.globals.utilities.set_global_seed(0) # Save MDF serialization of Composition to file and read back in. - mdf_fname = filename.replace('.py', '.json') - + _, mdf_fname, mdf_exec_fname = get_mdf_output_file(filename, tmp_path) exec( - f'pnl.write_mdf_file([{",".join(input_dict_strs)}], "{mdf_fname}")', + f'pnl.write_mdf_file([{",".join(input_dict_strs)}], "{mdf_exec_fname}")', orig_globals, orig_locals ) @@ -307,7 +331,7 @@ def _get_mdf_model_results(evaluable_graph, composition=None): ('model_integrators.py', 'comp', {'A': 1.0}, False, integrators_runtime_params), ] ) -def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_format, run_args): +def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_format, run_args, tmp_path): from modeci_mdf.utils import load_mdf import modeci_mdf.execution_engine as ee @@ -320,7 +344,7 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for ) # Save MDF serialization of Composition to file and read back in. - mdf_fname = filename.replace('.py', '.json') + _, mdf_fname, _ = get_mdf_output_file(filename, tmp_path) composition = eval(composition_name, orig_globals, orig_locals) pnl.write_mdf_file(composition, mdf_fname, simple_edge_format=simple_edge_format) @@ -405,14 +429,12 @@ def test_mdf_equivalence_individual_functions(mech_type, function, runtime_param ] ) @pytest.mark.parametrize('fmt', ['json', 'yml']) -def test_generate_script_from_mdf(filename, composition_name, fmt): +def test_generate_script_from_mdf(filename, composition_name, fmt, tmp_path): orig_file = read_defined_model_script(filename) exec(orig_file) serialized = eval(f'pnl.get_mdf_serialized({composition_name}, fmt="{fmt}")') - outfi = filename.replace('.py', f'.{fmt}') - with open(outfi, 'w') as f: - f.write(serialized) + mdf_file, mdf_fname, _ = get_mdf_output_file(filename, tmp_path, fmt) + mdf_file.write_text(serialized) - with open(outfi, 'r') as f: - assert pnl.generate_script_from_mdf(f.read()) == pnl.generate_script_from_mdf(outfi) + assert pnl.generate_script_from_mdf(mdf_file.read_text()) == pnl.generate_script_from_mdf(mdf_fname) From 64caf838e1674dc050cd455d6c7324f9e14fff12 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 11 Jan 2024 23:36:50 +0000 Subject: [PATCH 42/65] tests: MDF: rename --- tests/mdf/test_mdf.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/mdf/test_mdf.py b/tests/mdf/test_mdf.py index faa60bb8e27..e66df63ae60 100644 --- a/tests/mdf/test_mdf.py +++ b/tests/mdf/test_mdf.py @@ -36,7 +36,7 @@ def get_onnx_fixed_noise_str(onnx_op, **kwargs): } -json_results_parametrization = [ +pnl_mdf_results_parametrization = [ ('model_basic.py', 'comp', '{A: 1}', True), ('model_basic.py', 'comp', '{A: 1}', False), ('model_basic_non_identity.py', 'comp', '{A: 1}', True), @@ -152,9 +152,9 @@ def assert_result_equality(orig_results, new_results): @pytest.mark.parametrize( 'filename, composition_name, input_dict_str, simple_edge_format', - json_results_parametrization + pnl_mdf_results_parametrization ) -def test_json_results_equivalence( +def test_get_mdf_serialized_results_equivalence_pnl_only( filename, composition_name, input_dict_str, @@ -183,9 +183,9 @@ def test_json_results_equivalence( @pytest.mark.parametrize( 'filename, composition_name, input_dict_str, simple_edge_format', - json_results_parametrization + pnl_mdf_results_parametrization ) -def test_write_json_file( +def test_write_mdf_file_results_equivalence_pnl_only( filename, composition_name, input_dict_str, @@ -228,7 +228,7 @@ def test_write_json_file( ('model_with_two_disjoint_comps.py', {'comp': '{A: 1}', 'comp2': '{C: 1}'}), ] ) -def test_write_json_file_multiple_comps( +def test_write_mdf_file_results_equivalence_pnl_only_multiple_comps( filename, input_dict_strs, tmp_path, @@ -331,7 +331,7 @@ def _get_mdf_model_results(evaluable_graph, composition=None): ('model_integrators.py', 'comp', {'A': 1.0}, False, integrators_runtime_params), ] ) -def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_format, run_args, tmp_path): +def test_mdf_pnl_results_equivalence(filename, composition_name, input_dict, simple_edge_format, run_args, tmp_path): from modeci_mdf.utils import load_mdf import modeci_mdf.execution_engine as ee @@ -399,7 +399,7 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for *individual_functions_fhn_test_data, ] ) -def test_mdf_equivalence_individual_functions(mech_type, function, runtime_params, trial_termination_cond): +def test_mdf_pnl_results_equivalence_individual_functions(mech_type, function, runtime_params, trial_termination_cond): import modeci_mdf.execution_engine as ee A = mech_type(name='A', function=copy.deepcopy(function)) From 6d58d3c85662cce8258a379d8df6753151920414 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 19 Jan 2024 12:23:41 -0500 Subject: [PATCH 43/65] broken_trans_deps: Block sphinxcontrib-applehelp>=1.0.8 Needs sphinx>=5 without indicating it in the deps. Signed-off-by: Jan Vesely --- broken_trans_deps.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index e72e65d7a52..795c4c29295 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -29,3 +29,6 @@ cattrs != 23.1.1; python_version < '3.8' # cattrs==23.2.{1,2} breaks json serialization # https://github.com/python-attrs/cattrs/issues/453 cattrs != 23.2.1, != 23.2.2 + +# sphinxcontrib-applehelp >=1.0.8 needs at least sphinx-5 +sphinxcontrib-applehelp <1.0.8 From ede6372d39e5e89795f7c7d2c21d50287be79064 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 19 Jan 2024 14:50:50 -0500 Subject: [PATCH 44/65] broken_trans_deps: Block sphinxcontrib-devhelp>=1.0.6 Needs sphinx>=5 without indicating it in the deps. Signed-off-by: Jan Vesely --- broken_trans_deps.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index 795c4c29295..54e16c8033d 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -30,5 +30,6 @@ cattrs != 23.1.1; python_version < '3.8' # https://github.com/python-attrs/cattrs/issues/453 cattrs != 23.2.1, != 23.2.2 -# sphinxcontrib-applehelp >=1.0.8 needs at least sphinx-5 +# sphinxcontrib-applehelp >=1.0.8, sphinxcontrib-devhelp >=1.0.6 needs at least sphinx-5 sphinxcontrib-applehelp <1.0.8 +sphinxcontrib-devhelp <1.0.6 From 31efb6774c3581d631cff844535188978ae3d5dc Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 19 Jan 2024 15:05:04 -0500 Subject: [PATCH 45/65] broken_trans_deps: Block sphinxcontrib-htmlhelp>=2.0.5 Needs sphinx>=5 without indicating it in the deps. Signed-off-by: Jan Vesely --- broken_trans_deps.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index 54e16c8033d..c4d50f3f718 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -30,6 +30,10 @@ cattrs != 23.1.1; python_version < '3.8' # https://github.com/python-attrs/cattrs/issues/453 cattrs != 23.2.1, != 23.2.2 -# sphinxcontrib-applehelp >=1.0.8, sphinxcontrib-devhelp >=1.0.6 needs at least sphinx-5 +# The following need at least sphinx-5 without indicating it in dependencies: +# * sphinxcontrib-applehelp >=1.0.8, +# * sphinxcontrib-devhelp >=1.0.6, +# * sphinxcontrib-htmlhelp >=2.0.5, sphinxcontrib-applehelp <1.0.8 sphinxcontrib-devhelp <1.0.6 +sphinxcontrib-htmlhelp <2.0.5 From 0267cbb229b45575d0a3a994dd136634b08722fb Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 19 Jan 2024 15:13:17 -0500 Subject: [PATCH 46/65] broken_trans_deps: Block sphinxcontrib-serializinghtml>=1.1.10 Needs sphinx>=5 without indicating it in the deps. Signed-off-by: Jan Vesely --- broken_trans_deps.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index c4d50f3f718..1e1553fa646 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -34,6 +34,8 @@ cattrs != 23.2.1, != 23.2.2 # * sphinxcontrib-applehelp >=1.0.8, # * sphinxcontrib-devhelp >=1.0.6, # * sphinxcontrib-htmlhelp >=2.0.5, +# * sphinxcontrib-serializinghtml >=1.1.10 sphinxcontrib-applehelp <1.0.8 sphinxcontrib-devhelp <1.0.6 sphinxcontrib-htmlhelp <2.0.5 +sphinxcontrib-serializinghtml <1.1.10 From ae84d45a84fc7bd9f8831bdaf0bea21f0a61c731 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 19 Jan 2024 15:21:11 -0500 Subject: [PATCH 47/65] broken_trans_deps: Block sphinxcontrib-qthelp>=1.0.7 Needs sphinx>=5 without indicating it in the deps. Signed-off-by: Jan Vesely --- broken_trans_deps.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index 1e1553fa646..eb6372f61a2 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -34,8 +34,10 @@ cattrs != 23.2.1, != 23.2.2 # * sphinxcontrib-applehelp >=1.0.8, # * sphinxcontrib-devhelp >=1.0.6, # * sphinxcontrib-htmlhelp >=2.0.5, -# * sphinxcontrib-serializinghtml >=1.1.10 +# * sphinxcontrib-serializinghtml >=1.1.10, +# * sphinxcontrib-qthelp >=1.0.7 sphinxcontrib-applehelp <1.0.8 sphinxcontrib-devhelp <1.0.6 sphinxcontrib-htmlhelp <2.0.5 sphinxcontrib-serializinghtml <1.1.10 +sphinxcontrib-qthelp <1.0.7 From d40210bc8f993710fda1fa415c8b2ae92e84fba3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 20 Jan 2024 18:29:57 +0000 Subject: [PATCH 48/65] github-actions(deps): bump actions/cache from 3 to 4 (#2879) --- .github/workflows/pnl-ci-docs.yml | 2 +- .github/workflows/pnl-ci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 0bca607ab25..2b37af516a1 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -92,7 +92,7 @@ jobs: echo "pip_cache_dir=$(python -m pip cache dir)" | tee -a $GITHUB_OUTPUT - name: Wheels cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ steps.pip_cache.outputs.pip_cache_dir }}/wheels key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'doc_requirements.txt') }}-${{ github.sha }} diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 34dd87b598e..236bbc7eb8f 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -143,7 +143,7 @@ jobs: echo "pip_cache_dir=$(python -m pip cache dir)" | tee -a $GITHUB_OUTPUT - name: Wheels cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ steps.pip_cache.outputs.pip_cache_dir }}/wheels key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'dev_requirements.txt') }}-${{ github.sha }} From 2a3f89eef92c2004b76820d51e7e6e779edfc079 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 21 Jan 2024 14:08:59 -0500 Subject: [PATCH 49/65] treewide: Assign values to ".base" attribute of a modulated Parameter Fixes 3 instances of: FutureWarning: Setting parameter values directly using dot notation may be removed in a future release. Signed-off-by: Jan Vesely --- .../core/components/mechanisms/processing/transfermechanism.py | 2 +- tests/mechanisms/test_ddm_mechanism.py | 2 +- tests/mechanisms/test_mechanisms.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 0298ff4f733..221eac63850 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -443,7 +443,7 @@ **noise** (it must be the same length as the Mechanism's `variable `), in which case each element is applied Hadamard (elementwise) to the result, as shown here:: - >>> my_linear_tm.noise = [1.0,1.2,.9] + >>> my_linear_tm.noise.base = [1.0,1.2,.9] >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[2. , 2.2, 1.9]]) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index c9e463a4bf2..e3f306b0f81 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -77,7 +77,7 @@ def test_valid(self): # reset only decision variable D.function.initializer = 1.0 - D.function.non_decision_time = 0.0 + D.function.non_decision_time.base = 0.0 D.reset() np.testing.assert_allclose(D.function.value[0], 1.0) np.testing.assert_allclose(D.function.parameters.previous_value.get(), 1.0) diff --git a/tests/mechanisms/test_mechanisms.py b/tests/mechanisms/test_mechanisms.py index fd403991f6e..2835140936f 100644 --- a/tests/mechanisms/test_mechanisms.py +++ b/tests/mechanisms/test_mechanisms.py @@ -50,7 +50,7 @@ def test_noise_assignment_equivalence(self, noise): t2 = pnl.TransferMechanism(name='t2', size=2) t2.integrator_function.parameters.noise.set(noise()) - t1.integrator_function.noise.seed = 0 + t1.integrator_function.noise.seed.base = 0 t2.integrator_function.noise.base.seed = 0 for _ in range(5): From b2a5cb733504b570d99fa23cbd10c3c4e4218605 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 21 Jan 2024 16:03:04 -0500 Subject: [PATCH 50/65] requirements: Add scipy to requirements file PECOptimizationFunction requires scipy. Restrict version to <1.12 to avoid failures in differential_evolution. Signed-off-by: Jan Vesely --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index d9e4a43687f..fe7e904f2f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,5 +18,6 @@ pillow<10.3.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 +scipy<1.12 toposort<1.11 torch>=1.10.0, <2.2.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From 8ce059a380b6c4bf2ee89675d4c4a4499283a8b8 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 21 Jan 2024 16:10:30 -0500 Subject: [PATCH 51/65] Functions/UniformToNormalDist: Do not check for scipy at runtime It's now a required dependency. Signed-off-by: Jan Vesely --- .../nonstateful/distributionfunctions.py | 6 +-- tests/mechanisms/test_transfer_mechanism.py | 38 ++++++------------- 2 files changed, 13 insertions(+), 31 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index b0d2145028b..96ae2c45292 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -26,6 +26,7 @@ import numpy as np from beartype import beartype +from scipy.special import erfinv from psyneulink._typing import Optional @@ -371,11 +372,6 @@ def _function(self, params=None, ): - try: - from scipy.special import erfinv - except: - raise FunctionError("The UniformToNormalDist function requires the SciPy package.") - mean = self._get_current_parameter_value(DIST_MEAN, context) standard_deviation = self._get_current_parameter_value(STANDARD_DEVIATION, context) random_state = self.parameters.random_state._get(context) diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 85814d3388e..1873e5dcae7 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -299,33 +299,19 @@ def test_transfer_mech_exponential_noise(self): @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_uniform_to_normal_noise(self): - try: - import scipy - except ModuleNotFoundError: - with pytest.raises(FunctionError) as error_text: - T = TransferMechanism( - name='T', - default_variable=[0, 0, 0, 0], - function=Linear(), - noise=UniformToNormalDist(), - integration_rate=1.0 - ) - assert "The UniformToNormalDist function requires the SciPy package." in str(error_text.value) - else: - T = TransferMechanism( - name='T', - default_variable=[0, 0, 0, 0], - function=Linear(), - noise=UniformToNormalDist(), - integration_rate=1.0 - ) - # This is equivalent to - # T.noise.base.parameters.random_state.get(None).seed([22]) - T.noise.parameters.seed.set(22, None) - val = T.execute([0, 0, 0, 0]) - np.testing.assert_allclose(val, [[1.73027452, -1.07866481, -1.98421126, 2.99564032]]) - + T = TransferMechanism( + name='T', + default_variable=[0, 0, 0, 0], + function=Linear(), + noise=UniformToNormalDist(), + integration_rate=1.0 + ) + # This is equivalent to + # T.noise.base.parameters.random_state.get(None).seed([22]) + T.noise.parameters.seed.set(22, None) + val = T.execute([0, 0, 0, 0]) + np.testing.assert_allclose(val, [[1.73027452, -1.07866481, -1.98421126, 2.99564032]]) @pytest.mark.mechanism @pytest.mark.transfer_mechanism From 15fcba7fe1d27b82b6bedc0514acb80eeacd8d19 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 21 Jan 2024 16:13:17 -0500 Subject: [PATCH 52/65] tests/RecurrentTransferMechanism: Use matrix.modulated or get_mod_matrix() instead of mod_matrix The latter is deprecated. Signed-off-by: Jan Vesely --- tests/mechanisms/test_recurrent_transfer_mechanism.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 9fc8a95c4b7..d4fa9ff75bd 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -820,7 +820,7 @@ def test_recurrent_mech_with_learning(self): ) # Test that all of these are the same: np.testing.assert_allclose( - R.recurrent_projection.mod_matrix, + R.recurrent_projection.matrix.modulated, [ [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1], @@ -880,8 +880,8 @@ def test_recurrent_mech_change_learning_rate(self): [1.1, 0., 1.1, 1.1], [1.1, 1.1, 0., 1.1], [1.1, 1.1, 1.1, 0.]] - np.testing.assert_allclose(R.recurrent_projection.mod_matrix, matrix_1) - print(R.recurrent_projection.mod_matrix) + np.testing.assert_allclose(R.recurrent_projection.get_mod_matrix(c), matrix_1) + print(R.recurrent_projection.get_mod_matrix(c)) R.learning_rate.base = 0.9 assert R.learning_rate.base == 0.9 @@ -892,8 +892,8 @@ def test_recurrent_mech_change_learning_rate(self): [1.911125, 0., 1.911125, 1.911125], [1.911125, 1.911125, 0., 1.911125], [1.911125, 1.911125, 1.911125, 0.]] - # np.testing.assert_allclose(R.recurrent_projection.mod_matrix, matrix_2) - print(R.recurrent_projection.mod_matrix) + # np.testing.assert_allclose(R.recurrent_projection.get_mod_matrix(c), matrix_2) + print(R.recurrent_projection.get_mod_matrix(c)) def test_learning_of_orthognal_inputs(self): size=4 From 16c70ce85073b30266a2fb0ae9e6ff6411139bd2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 21 Jan 2024 16:14:20 -0500 Subject: [PATCH 53/65] setup: Error on SyntaxWarnings Signed-off-by: Jan Vesely --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 318879c8f93..141ba999200 100644 --- a/setup.cfg +++ b/setup.cfg @@ -67,6 +67,7 @@ required_plugins = pytest-benchmark pytest-cov pytest-helpers-namespace pytest-p xfail_strict = True filterwarnings = + error::SyntaxWarning error:Creating an ndarray from ragged nested sequences \(which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes\) is deprecated.*:numpy.VisibleDeprecationWarning error:Invalid escape sequence From f9705ca3917bdaefe27b998d8a34be2a5ed51251 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 25 Jan 2024 09:57:53 -0500 Subject: [PATCH 54/65] codeql: Only purge python cache if CODEQL_PYTHON is set (#2888) The environment variable will point to the python installation that installed dependencies if dependencies were installed.[0] [0] https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning?learn=code_security_actions&learnProduct=code-security#analyzing-python-dependencies Signed-off-by: Jan Vesely --- .github/workflows/codeql.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 944cef64f5f..3cb3a4d5668 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,8 +46,11 @@ jobs: - name: Autobuild uses: github/codeql-action/autobuild@v3 - - name: Cache cleanup + - name: Pip cache cleanup shell: bash + # CODEQL_PYTHON is only defined if dependencies were installed [0] + # [0] https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning?learn=code_security_actions&learnProduct=code-security#analyzing-python-dependencies + if: ${{ env.CODEQL_PYTHON != '' }} run: | $CODEQL_PYTHON -m pip cache info $CODEQL_PYTHON -m pip cache purge From 76d4bf960b973d88428a87f375fdeb62a865969b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jan 2024 17:57:38 +0000 Subject: [PATCH 55/65] requirements: update beartype requirement from <0.16.0 to <0.17.0 (#2804) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fe7e904f2f7..21f45f32640 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ autograd<1.7 -beartype<0.16.0 +beartype<0.18.0 dill<0.3.8 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 From a84bfff34e54eb86ecb9be3762dadf57c7dd921a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 27 Jan 2024 01:33:17 +0000 Subject: [PATCH 56/65] requirements: update pandas requirement from <2.1.5 to <2.2.1 (#2890) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 21f45f32640..f0140eba236 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ networkx<3.3 numpy>=1.21.0, <1.24.5 optuna<3.4.0 packaging<24.0 -pandas<2.1.5 +pandas<2.2.1 pillow<10.3.0 pint<0.22.0 protobuf<3.20.4 From 492cff1ed9e9c3457839a93953c74ef1d2cd1a79 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 29 Jan 2024 12:22:06 -0500 Subject: [PATCH 57/65] ci/ga: Upload pr_number only once Instead of each job on PR uploading and overwriting the artifact, upload it only in the 'base' job. Fixes: bb9dc17440e6cf95c11ebee6d2cf7e36a81ef29b ("ci: fix missing PR number (#2109)") Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci-docs.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 2b37af516a1..14e4d938b34 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -131,11 +131,13 @@ jobs: path: docs/build/html - name: Store PR number - if: ${{ github.event_name == 'pull_request' }} + # The 'base' variant runs only on pull requests and has only one job + if: ${{ matrix.pnl-version == 'base' }} run: echo ${{ github.event.pull_request.number }} > ./pr_number.txt - name: Upload PR number for other workflows - if: ${{ github.event_name == 'pull_request' }} + # The 'base' variant runs only on pull requests and has only one job + if: ${{ matrix.pnl-version == 'base' }} uses: actions/upload-artifact@v3 with: name: pr_number From 8197593e9c9e916e73d93c2a4cd805d35e221ae5 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 29 Jan 2024 15:39:06 -0500 Subject: [PATCH 58/65] ci/ga: Do not upload distfile for version restricted run The package is identical to the vanilla run but for the fixed minimum versions of requirements. Fixes: 76eeef504e5f758dabc79e09be61cb24149a3754 ("ci/ga: Add a CI run with version restricted dependencies") Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 236bbc7eb8f..9df8300c2ce 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -203,6 +203,7 @@ jobs: - name: Upload dist packages uses: actions/upload-artifact@v3 + if: matrix.version-restrict == '' with: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} path: dist/ From 7f687452ca93b5c945314e068ce4ae2dc7cef36f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 29 Jan 2024 15:40:35 -0500 Subject: [PATCH 59/65] ci/ga: Add version restriction to test artifact name Fixes: 76eeef504e5f758dabc79e09be61cb24149a3754 ("ci/ga: Add a CI run with version restricted dependencies") Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 9df8300c2ce..4c7407b562f 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -178,7 +178,7 @@ jobs: - name: Upload test results uses: actions/upload-artifact@v3 with: - name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} + name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.version-restrict }} path: tests_out.xml retention-days: 5 if: (success() || failure()) && ! contains(matrix.extra-args, 'forked') From 9f2f9b8ee11d5b4b985cbc8ffdaaa44ca49ba089 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 30 Jan 2024 10:23:27 -0500 Subject: [PATCH 60/65] ci/ga: Bump versions of actions/{download,upload}-artifact (#2893) * github-actions(deps): bump actions/download-artifact from 3 to 4 Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 3 to 4. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * github-actions(deps): bump actions/upload-artifact from 3 to 4 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3 to 4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/pnl-ci-docs.yml | 8 ++++---- .github/workflows/pnl-ci.yml | 4 ++-- .github/workflows/test-release.yml | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 14e4d938b34..2e5f305ca51 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -124,7 +124,7 @@ jobs: run: git tag -d 'v0.0.0.0' - name: Upload Documentation - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Documentation-${{matrix.pnl-version}}-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} retention-days: 1 @@ -138,7 +138,7 @@ jobs: - name: Upload PR number for other workflows # The 'base' variant runs only on pull requests and has only one job if: ${{ matrix.pnl-version == 'base' }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: pr_number path: ./pr_number.txt @@ -170,7 +170,7 @@ jobs: ref: gh-pages - name: Download branch docs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 path: _built_docs/${{ github.ref }} @@ -187,7 +187,7 @@ jobs: if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' - name: Download main docs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 # This overwrites files in current directory diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 4c7407b562f..98452639a94 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -176,7 +176,7 @@ jobs: run: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto ${{ matrix.extra-args }} - name: Upload test results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.version-restrict }} path: tests_out.xml @@ -202,7 +202,7 @@ jobs: python setup.py sdist bdist_wheel - name: Upload dist packages - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: matrix.version-restrict == '' with: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index 45cacf39c88..8df8c2bfef2 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -38,7 +38,7 @@ jobs: echo "wheel=$(ls *.whl)" >> $GITHUB_OUTPUT - name: Upload Python dist files - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Python-dist-files path: dist/ @@ -78,7 +78,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ @@ -126,7 +126,7 @@ jobs: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto tests - name: Upload test results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: test-results-${{ matrix.os }}-${{ matrix.python-version }} path: tests_out.xml @@ -141,7 +141,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ @@ -175,7 +175,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: Python-dist-files path: dist/ From 113467326b85e9165c85bda24547592494a54c53 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jan 2024 21:24:03 +0000 Subject: [PATCH 61/65] requirements: update dill requirement from <0.3.8 to <0.3.9 (#2891) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f0140eba236..d4c5ad265a1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ autograd<1.7 beartype<0.18.0 -dill<0.3.8 +dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 graphviz<0.21.0 From 34ee885229d588232af8593e3661bf50d1c20030 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jan 2024 03:50:57 +0000 Subject: [PATCH 62/65] requirements: update pytest requirement from <7.4.5 to <8.0.1 (#2896) --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index a229217a04f..3683bd99fa2 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<24.0 -pytest<7.4.5 +pytest<8.0.1 pytest-benchmark<4.0.1 pytest-cov<4.1.1 pytest-forked<1.7.0 From 1be0b898df2d4ad07a442604d319028861561610 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 31 Jan 2024 19:55:09 -0500 Subject: [PATCH 63/65] ci/ga: Print additional CPU and memory info on all runners (#2898) Split numpy info from OS machine info. Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 98452639a94..40a94795212 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -163,12 +163,18 @@ jobs: # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Print test machine/env info + - name: Print numpy info shell: bash run: | python -c "import numpy; numpy.show_config()" + + - name: Print machine info + shell: bash + run: | case "$RUNNER_OS" in - Linux*) lscpu;; + Linux*) lscpu; lsmem;; + macOS*) sysctl -a | grep '^hw' ;; + Windows*) wmic cpu get description,currentclockspeed,NumberOfCores,NumberOfEnabledCore,NumberOfLogicalProcessors; wmic memorychip get capacity,speed,status,manufacturer ;; esac - name: Test with pytest From eb348417d20624b97e8f4094e5f53935298ce428 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 1 Feb 2024 10:08:30 -0500 Subject: [PATCH 64/65] ci/ga: Use all logical cores to run tests (#2900) Runners are running in a VM and test workers running on all vCPUs might get better performance than bare metal threads depending on the hypervisor vCPU scheduling. Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 40a94795212..83834f73be4 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -179,7 +179,7 @@ jobs: - name: Test with pytest timeout-minutes: 180 - run: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto ${{ matrix.extra-args }} + run: pytest --junit-xml=tests_out.xml --verbosity=0 -n logical ${{ matrix.extra-args }} - name: Upload test results uses: actions/upload-artifact@v4 From 47a59ec5fe036ebec6efccbd98b0baf0515b9e72 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 1 Feb 2024 17:28:16 -0500 Subject: [PATCH 65/65] tests/learning: Do not apply marks to a fixture (#2901) It doesn't transfer to tests using the fixture (and does nothing). Apply marks to parameters instead. Simplify model construction. Instead of a fixture that returns a constructor function, just use the constructor function directly. Signed-off-by: Jan Vesely --- tests/composition/test_learning.py | 60 ++++++++++++++---------------- 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 2cd856be390..69c48556e3b 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -12,10 +12,7 @@ from psyneulink.core.globals.keywords import Loss # from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import SSE, MSE, L0 -@pytest.mark.pytorch -@pytest.mark.composition -@pytest.fixture -def xor_network(): +def xor_network(comp_type, comp_learning_rate, pathway_learning_rate): """Create simple sample network for testing learning specifications Returns a function that takes a Composition type and learning_rate specifications and returns an instantiated Composition and its components @@ -39,39 +36,38 @@ def xor_network(): inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) targets = np.array([[0], [1], [1], [0]]) - def _get_comp_type(comp_type, comp_learning_rate, pathway_learning_rate): - if comp_type == 'composition': - xor = Composition(learning_rate=comp_learning_rate) - # Note: uses Projections specified above by inference - pathway = xor.add_backpropagation_learning_pathway(pathway=[input_layer,hidden_layer,output_layer], - learning_rate=pathway_learning_rate) - target_mechanism = pathway.learning_components[pnl.TARGET_MECHANISM] - elif comp_type == 'autodiff': - # FIX: the format commented out below doesn't work for LLVM: - # xor = pnl.AutodiffComposition(nodes=[input_layer,hidden_layer,output_layer]) - # xor.add_projections([input_to_hidden_wts, hidden_to_output_wts]) - xor = pnl.AutodiffComposition() - xor.add_node(input_layer) - xor.add_node(hidden_layer) - xor.add_node(output_layer) - xor.add_projection(sender=input_layer, projection=input_to_hidden_wts, receiver=hidden_layer) - xor.add_projection(sender=hidden_layer, projection=hidden_to_output_wts, receiver=output_layer) - target_mechanism = None - else: - assert False, f"Bad composition type parameter passed to xor_net fixture" - return xor, input_layer, hidden_layer, output_layer, target_mechanism, inputs, targets, - return _get_comp_type + if comp_type == 'composition': + xor = Composition(learning_rate=comp_learning_rate) + # Note: uses Projections specified above by inference + pathway = xor.add_backpropagation_learning_pathway(pathway=[input_layer,hidden_layer,output_layer], + learning_rate=pathway_learning_rate) + target_mechanism = pathway.learning_components[pnl.TARGET_MECHANISM] + elif comp_type == 'autodiff': + # FIX: the format commented out below doesn't work for LLVM: + # xor = pnl.AutodiffComposition(nodes=[input_layer,hidden_layer,output_layer]) + # xor.add_projections([input_to_hidden_wts, hidden_to_output_wts]) + xor = pnl.AutodiffComposition() + xor.add_node(input_layer) + xor.add_node(hidden_layer) + xor.add_node(output_layer) + xor.add_projection(sender=input_layer, projection=input_to_hidden_wts, receiver=hidden_layer) + xor.add_projection(sender=hidden_layer, projection=hidden_to_output_wts, receiver=output_layer) + target_mechanism = None + else: + assert False, f"Bad composition type parameter passed to xor_net fixture" + return xor, input_layer, hidden_layer, output_layer, target_mechanism, inputs, targets, class TestInputAndTargetSpecs: - @pytest.mark.pytorch + @pytest.mark.composition @pytest.mark.parametrize('input_type', ['dict', 'func', 'gen', 'gen_func']) - @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.PyTorch, - pnl.ExecutionMode.LLVMRun, + @pytest.mark.parametrize('exec_mode', [pytest.param(pnl.ExecutionMode.PyTorch, marks=pytest.mark.pytorch), + pytest.param(pnl.ExecutionMode.LLVMRun, marks=pytest.mark.llvm), pnl.ExecutionMode.Python]) - @pytest.mark.parametrize('comp_type', ['composition', 'autodiff']) - def test_node_spec_types(self, xor_network, comp_type, input_type, exec_mode): + @pytest.mark.parametrize('comp_type', ['composition', + pytest.param('autodiff', marks=pytest.mark.pytorch)]) + def test_node_spec_types(self, comp_type, input_type, exec_mode): if comp_type == 'composition' and exec_mode != pnl.ExecutionMode.Python: pytest.skip(f"Execution mode {exec_mode} not relevant for Composition learn") @@ -1898,7 +1894,7 @@ def test_matrix_spec_and_learning_rate(self): ('learning_mech', .01, .02, .03, .04, [[0.63458688]]), ] @pytest.mark.parametrize('spec_types', spec_types, ids=[x[0] for x in spec_types]) - def test_different_learning_rate_specs_for_comp(self, xor_network, spec_types): + def test_different_learning_rate_specs_for_comp(self, spec_types): learning_mech_learning_rate = spec_types[1] learning_pathway_learning_rate = spec_types[2] composition_learning_rate = spec_types[3]