Skip to content

Commit

Permalink
Merge pull request #2099 from PrincetonUniversity/devel
Browse files Browse the repository at this point in the history
Devel
  • Loading branch information
kmantel committed Aug 10, 2021
2 parents 5eb2ead + 8eed471 commit cf381ac
Show file tree
Hide file tree
Showing 27 changed files with 499 additions and 3,412 deletions.
4 changes: 2 additions & 2 deletions Scripts/Examples/Multilayer-Learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@
)


def print_header(comp):
print("\n\n**** Time: ", comp.scheduler.clock.simple_time)
def print_header(comp, context):
print("\n\n**** Time: ", comp.scheduler.get_clock(context).simple_time)


def show_target(comp):
Expand Down
4 changes: 2 additions & 2 deletions Scripts/Examples/_Gating-Mechanism.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@
}


def print_header(system):
print("\n\n**** Time: ", system.scheduler.clock.simple_time)
def print_header(system, context):
print("\n\n**** Time: ", system.scheduler.get_clock(context).simple_time)


def show_target():
Expand Down
4 changes: 2 additions & 2 deletions doc_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
psyneulink-sphinx-theme<=1.2.1.7
sphinx<3.3.2
psyneulink-sphinx-theme<1.2.2.2
sphinx<4.1.3
sphinx_autodoc_typehints<1.13.0
27 changes: 13 additions & 14 deletions psyneulink/core/components/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,7 @@
from enum import Enum, IntEnum

import dill
import graph_scheduler
import numpy as np

from psyneulink.core import llvm as pnlvm
Expand Down Expand Up @@ -1325,7 +1326,7 @@ def _convert(p):
# Skip first element of random state (id string)
val = pnlvm._tupleize(x.get_state()[1:])
elif isinstance(x, Time):
val = tuple(getattr(x, Time._time_scale_attr_map[t]) for t in TimeScale)
val = tuple(getattr(x, graph_scheduler.time._time_scale_to_attr_str(t)) for t in TimeScale)
elif isinstance(x, Component):
return x._get_state_initializer(context)
elif isinstance(x, ContentAddressableList):
Expand Down Expand Up @@ -3087,19 +3088,7 @@ def _execute(self, variable=None, context=None, runtime_params=None, **kwargs):
pass

self.most_recent_context = context

# Restore runtime_params to previous value
if runtime_params:
for param in runtime_params:
try:
prev_val = getattr(self.parameters, param).get_previous(context)
self._set_parameter_value(param, prev_val, context)
except AttributeError:
try:
prev_val = getattr(self.function.parameters, param).get_previous(context)
self.function._set_parameter_value(param, prev_val, context)
except:
pass
self._reset_runtime_parameters(context)

return value

Expand Down Expand Up @@ -3166,6 +3155,16 @@ def _get_current_parameter_value(self, parameter, context=None):

return parameter._get(context)

def _reset_runtime_parameters(self, context):
if context.execution_id in self._runtime_params_reset:
for key in self._runtime_params_reset[context.execution_id]:
self._set_parameter_value(
key,
self._runtime_params_reset[context.execution_id][key],
context
)
self._runtime_params_reset[context.execution_id] = {}

def _try_execute_param(self, param, var, context=None):
def fill_recursively(arr, value, indices=()):
if arr.ndim == 0:
Expand Down
1 change: 1 addition & 0 deletions psyneulink/core/components/functions/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -647,6 +647,7 @@ def function(self,
raise FunctionError(err_msg)
self.most_recent_context = context
self.parameters.value._set(value, context=context)
self._reset_runtime_parameters(context)
return value

@abc.abstractmethod
Expand Down
3 changes: 2 additions & 1 deletion psyneulink/core/components/mechanisms/mechanism.py
Original file line number Diff line number Diff line change
Expand Up @@ -1109,7 +1109,8 @@
from psyneulink.core.globals.utilities import \
ContentAddressableList, append_type_to_name, convert_all_elements_to_np_array, convert_to_np_array, \
iscompatible, kwCompatibilityNumeric, convert_to_list
from psyneulink.core.scheduling.condition import Condition, TimeScale
from psyneulink.core.scheduling.condition import Condition
from psyneulink.core.scheduling.time import TimeScale

__all__ = [
'Mechanism_Base', 'MechanismError', 'MechanismRegistry'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -852,7 +852,7 @@
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.utilities import \
all_within_range, append_type_to_name, iscompatible, is_comparison_operator, convert_to_np_array, safe_equals
from psyneulink.core.scheduling.condition import TimeScale
from psyneulink.core.scheduling.time import TimeScale

__all__ = [
'INITIAL_VALUE', 'CLIP', 'INTEGRATOR_FUNCTION', 'INTEGRATION_RATE',
Expand Down
49 changes: 32 additions & 17 deletions psyneulink/core/compositions/composition.py
Original file line number Diff line number Diff line change
Expand Up @@ -2361,6 +2361,7 @@ def input_function(env, result):
from copy import deepcopy, copy
from inspect import isgenerator, isgeneratorfunction

import graph_scheduler
import networkx
import numpy as np
import pint
Expand Down Expand Up @@ -3417,7 +3418,7 @@ def scheduler(self):
"""
if self.needs_update_scheduler or not isinstance(self._scheduler, Scheduler):
old_scheduler = self._scheduler
self._scheduler = Scheduler(graph=self.graph_processing, default_execution_id=self.default_execution_id)
self._scheduler = Scheduler(composition=self)

if old_scheduler is not None:
self._scheduler.add_condition_set(old_scheduler.conditions)
Expand Down Expand Up @@ -5509,6 +5510,31 @@ def _check_for_unnecessary_feedback_projections(self):
)
)

def _check_for_nesting_with_absolute_conditions(self, scheduler, termination_conds=None):
if any(isinstance(n, Composition) for n in self.nodes):
interval_conds = set()
fixed_point_conds = set()
for _, cond in scheduler.get_absolute_conditions(termination_conds).items():
if len(cond.absolute_intervals) > 0:
interval_conds.add(cond)
if scheduler.mode == SchedulingMode.EXACT_TIME:
if len(cond.absolute_fixed_points) > 0:
fixed_point_conds.add(cond)

warn_str = f'{self} contains a nested Composition, which may cause unexpected behavior in absolute time conditions or failure to terminate execution.'
warn = False
if len(interval_conds) > 0:
warn_str += '\nFor repeating intervals:\n\t'
warn_str += '\n\t'.join([f'{cond.owner}: {cond}\n\t\tintervals: {cond.absolute_intervals}' for cond in interval_conds])
warn = True
if len(fixed_point_conds) > 0:
warn_str += '\nIn EXACT_TIME SchedulingMode, strict time points:\n\t'
warn_str += '\n\t'.join([f'{cond.owner}: {cond}\n\t\tstrict time points: {cond.absolute_fixed_points}' for cond in fixed_point_conds])
warn = True

if warn:
warnings.warn(warn_str)

# ******************************************************************************************************************
# PATHWAYS
# ******************************************************************************************************************
Expand Down Expand Up @@ -8316,6 +8342,7 @@ def run(
self._analyze_graph(context=context)

self._check_for_unnecessary_feedback_projections()
self._check_for_nesting_with_absolute_conditions(scheduler, termination_processing)

# set auto logging if it's not already set, and if log argument is True
if log:
Expand Down Expand Up @@ -8934,7 +8961,10 @@ def execute(
# TODO: scheduler counts and clocks were not expected to be
# used prior to Scheduler.run calls. Remove this hack when
# accommodation is written
execution_scheduler._init_counts(context.execution_id, base_context.execution_id)
try:
execution_scheduler._init_counts(context.execution_id, base_context.execution_id)
except graph_scheduler.SchedulerError:
execution_scheduler._init_counts(context.execution_id)

# If execute method is called directly, need to create Report object for reporting
if not (context.source & ContextFlags.COMPOSITION) or report_num is None:
Expand Down Expand Up @@ -9399,21 +9429,6 @@ def execute(
report_num=report_num,
runtime_params=execution_runtime_params,
)
# Reset runtim_params
# Reset any specified for Mechanism
if context.execution_id in node._runtime_params_reset:
for key in node._runtime_params_reset[context.execution_id]:
node._set_parameter_value(key, node._runtime_params_reset[context.execution_id][key],
context)
node._runtime_params_reset[context.execution_id] = {}
# Reset any specified for Mechanism's function
if context.execution_id in node.function._runtime_params_reset:
for key in node.function._runtime_params_reset[context.execution_id]:
node.function._set_parameter_value(
key,
node.function._runtime_params_reset[context.execution_id][key],
context)
node.function._runtime_params_reset[context.execution_id] = {}

# Set execution_phase for node's context back to IDLE
if self._is_learning(context):
Expand Down
18 changes: 9 additions & 9 deletions psyneulink/core/globals/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -1886,9 +1886,9 @@ def _log_trials_and_runs(composition, curr_condition: tc.enum(LogCondition.TRIAL
for mech in composition.mechanisms:
for component in mech.log.loggable_components:
if component.logPref & curr_condition:
# value = LogEntry((composition.scheduler.clock.time.run,
# composition.scheduler.clock.time.trial,
# composition.scheduler.clock.time.time_step),
# value = LogEntry((composition.scheduler.get_clock(context).time.run,
# composition.scheduler.get_clock(context).time.trial,
# composition.scheduler.get_clock(context).time.time_step),
# # context,
# curr_condition,
# component.value)
Expand All @@ -1898,9 +1898,9 @@ def _log_trials_and_runs(composition, curr_condition: tc.enum(LogCondition.TRIAL
for proj in mech.afferents:
for component in proj.log.loggable_components:
if component.logPref & curr_condition:
# value = LogEntry((composition.scheduler.clock.time.run,
# composition.scheduler.clock.time.trial,
# composition.scheduler.clock.time.time_step),
# value = LogEntry((composition.scheduler.get_clock(context).time.run,
# composition.scheduler.get_clock(context).time.trial,
# composition.scheduler.get_clock(context).time.time_step),
# context,
# component.value)
# component.log._log_value(value, context)
Expand All @@ -1910,9 +1910,9 @@ def _log_trials_and_runs(composition, curr_condition: tc.enum(LogCondition.TRIAL
# for proj in composition.projections:
# for component in proj.log.loggable_components:
# if component.logPref & curr_condition:
# value = LogEntry((composition.scheduler.clock.time.run,
# composition.scheduler.clock.time.trial,
# composition.scheduler.clock.time.time_step),
# value = LogEntry((composition.scheduler.get_clock(context).time.run,
# composition.scheduler.get_clock(context).time.trial,
# composition.scheduler.get_clock(context).time.time_step),
# context,
# component.value)
# component.log._log_value(value, context)
13 changes: 13 additions & 0 deletions psyneulink/core/llvm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import enum
import functools
import numpy as np
import time
from typing import Set

from llvmlite import ir
Expand Down Expand Up @@ -89,9 +90,16 @@ def __init__(self, name: str):
f = _find_llvm_function(self.name, _compiled_modules() | _staged_modules())

# Create ctype function instance
start = time.perf_counter()
return_type = _convert_llvm_ir_to_ctype(f.return_value.type)
params = [_convert_llvm_ir_to_ctype(a.type) for a in f.args]
middle = time.perf_counter()
self.__c_func_type = ctypes.CFUNCTYPE(return_type, *params)
finish = time.perf_counter()

if "time_stat" in debug_env:
print("Time to create ctype function '{}': {} ({} to create types)".format(
name, finish - start, middle - start))

self.byref_arg_types = [p._type_ for p in params]

Expand Down Expand Up @@ -157,6 +165,7 @@ def get_multi_run(self):

# Initialize builtins
def init_builtins():
start = time.perf_counter()
with LLVMBuilderContext.get_global() as ctx:
# Numeric
builtins.setup_pnl_intrinsics(ctx)
Expand All @@ -182,6 +191,10 @@ def init_builtins():
builtins.setup_mat_scalar_mult(ctx)
builtins.setup_mat_scalar_add(ctx)

finish = time.perf_counter()

if "time_stat" in debug_env:
print("Time to setup PNL builtins: {}".format(finish - start))

def cleanup():
_cpu_engine.clean_module()
Expand Down
4 changes: 2 additions & 2 deletions psyneulink/core/llvm/builder_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import re
from typing import Set
import weakref
from psyneulink.core.scheduling.time import Time
from psyneulink.core.scheduling.time import Time, TimeScale
from psyneulink.core.globals.sampleiterator import SampleIterator
from psyneulink.core.globals.utilities import ContentAddressableList
from psyneulink.core import llvm as pnlvm
Expand Down Expand Up @@ -337,7 +337,7 @@ def convert_python_struct_to_llvm_ir(self, t):
elif isinstance(t, np.random.RandomState):
return pnlvm.builtins.get_mersenne_twister_state_struct(self)
elif isinstance(t, Time):
return ir.ArrayType(self.int32_ty, len(Time._time_scale_attr_map))
return ir.ArrayType(self.int32_ty, len(TimeScale))
elif isinstance(t, SampleIterator):
if isinstance(t.generator, list):
return ir.ArrayType(self.float_ty, len(t.generator))
Expand Down
1 change: 1 addition & 0 deletions psyneulink/core/llvm/debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
Increased debug output:
* "compile" -- prints information messages when modules are compiled
* "stat" -- prints code generation and compilation statistics
* "time_stat" -- print compilation and code generation times
* "cuda_data" -- print data upload/download statistic (to GPU VRAM)
* "comp_node_debug" -- print intermediate results after execution composition node wrapper.
* "print_values" -- Enabled printfs in llvm code (from ctx printf helper)
Expand Down
4 changes: 2 additions & 2 deletions psyneulink/core/llvm/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@
from llvmlite import ir

from .debug import debug_env
from ..scheduling.condition import All, AllHaveRun, Always, Any, AtPass, AtTrial, BeforeNCalls, AtNCalls, AfterNCalls, \
from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, Any, AtPass, AtTrial, BeforeNCalls, AtNCalls, AfterNCalls, \
EveryNCalls, Never, Not, WhenFinished, WhenFinishedAny, WhenFinishedAll
from ..scheduling.time import TimeScale
from psyneulink.core.scheduling.time import TimeScale


@contextmanager
Expand Down
27 changes: 27 additions & 0 deletions psyneulink/core/llvm/jit_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
# ********************************************* LLVM bindings **************************************************************

from llvmlite import binding
import time
import warnings

from .builder_context import LLVMBuilderContext, _find_llvm_function, _gen_cuda_kernel_wrapper_module
Expand Down Expand Up @@ -165,7 +166,13 @@ def __del__(self):
print("Total parsed modules in '{}': {}".format(s, self.__parsed_modules))

def opt_and_add_bin_module(self, module):
start = time.perf_counter()
self._pass_manager.run(module)
finish = time.perf_counter()

if "time_stat" in debug_env:
print("Time to optimize LLVM module bundle '{}': {}".format(module.name, finish - start))

if "opt" in self.__debug_env:
with open(self.__class__.__name__ + '-' + str(self.__optimized_modules) + '.opt.ll', 'w') as dump_file:
dump_file.write(str(module))
Expand All @@ -175,8 +182,12 @@ def opt_and_add_bin_module(self, module):
with open(self.__class__.__name__ + '-' + str(self.__optimized_modules) + '.S', 'w') as dump_file:
dump_file.write(self._target_machine.emit_assembly(module))

start = time.perf_counter()
self._engine.add_module(module)
self._engine.finalize_object()
finish = time.perf_counter()
if "time_stat" in debug_env:
print("Time to finalize LLVM module bundle '{}': {}".format(module.name, finish - start))
self.__optimized_modules += 1

def _remove_bin_module(self, module):
Expand Down Expand Up @@ -228,7 +239,14 @@ def compile_staged(self):
mod_bundle = binding.parse_assembly("")
while self.staged_modules:
m = self.staged_modules.pop()

start = time.perf_counter()
new_mod = _try_parse_module(m)
finish = time.perf_counter()

if "time_stat" in debug_env:
print("Time to parse LLVM modules '{}': {}".format(m.name, finish - start))

self.__parsed_modules += 1
if new_mod is not None:
mod_bundle.link_in(new_mod)
Expand Down Expand Up @@ -276,11 +294,20 @@ def set_object_cache(cache):
def add_module(self, module):
try:
# LLVM can't produce CUBIN for some reason
start_time = time.perf_counter()
ptx = self._target_machine.emit_assembly(module)
ptx_time = time.perf_counter()
mod = pycuda.compiler.DynamicModule()
mod.add_data(self._generated_builtins, pycuda.driver.jit_input_type.CUBIN, "builtins.cubin")
mod.add_data(ptx.encode(), pycuda.driver.jit_input_type.PTX, module.name + ".ptx")
module_time = time.perf_counter()
ptx_mod = mod.link()
finish_time = time.perf_counter()
if "time_stat" in debug_env:
print("Time to emit PTX module bundle '{}'({} lines): {}".format(module.name, len(ptx.splitlines()), ptx_time - start_time))
print("Time to add PTX module bundle '{}': {}".format(module.name, module_time - ptx_time))
print("Time to link PTX module bundle '{}': {}".format(module.name, finish_time - module_time))
print("Total time to process PTX module bundle '{}': {}".format(module.name, finish_time - start_time))

except Exception as e:
print("FAILED to generate PTX module:", e)
Expand Down
Loading

0 comments on commit cf381ac

Please sign in to comment.