diff --git a/.github/actions/install-pnl/action.yml b/.github/actions/install-pnl/action.yml index 8571a3293b0..cd4dca7dbe9 100644 --- a/.github/actions/install-pnl/action.yml +++ b/.github/actions/install-pnl/action.yml @@ -48,6 +48,17 @@ runs: [[ ${{ runner.os }} = Windows* ]] && pip install "pywinpty<1" "terminado<0.10" fi + - name: Install updated package + if: ${{ startsWith(github.head_ref, 'dependabot/pip') && matrix.pnl-version != 'base' }} + shell: bash + id: new_package + run: | + python -m pip install --upgrade pip wheel + export NEW_PACKAGE=`echo '${{ github.head_ref }}' | cut -f 4 -d/ | sed 's/-gt.*//' | sed 's/-lt.*//'` + echo "::set-output name=new_package::$NEW_PACKAGE" + pip install "`echo $NEW_PACKAGE | sed 's/[-_]/./g' | xargs grep *requirements.txt -h -e | head -n1`" + pip show "$NEW_PACKAGE" | grep 'Version' | tee new_version.deps + - name: Python dependencies shell: bash run: | @@ -66,3 +77,11 @@ runs: pip cache remove -v $P || true fi done + + - name: Check updated package + if: ${{ startsWith(github.head_ref, 'dependabot/pip') && matrix.pnl-version != 'base' }} + shell: bash + run: | + pip show ${{ steps.new_package.outputs.new_package }} | grep 'Version' | tee installed_version.deps + cmp -s new_version.deps installed_version.deps || echo "::error::Package version restricted by dependencies: ${{ steps.new_package.outputs.new_package }}" + diff new_version.deps installed_version.deps diff --git a/.github/workflows/compare-comment.yml b/.github/workflows/compare-comment.yml index 61bf6896a5d..15f5e85cf6d 100644 --- a/.github/workflows/compare-comment.yml +++ b/.github/workflows/compare-comment.yml @@ -18,7 +18,7 @@ jobs: steps: - name: 'Download docs artifacts' id: docs-artifacts - uses: actions/github-script@v5 + uses: actions/github-script@v6 with: script: | var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ @@ -70,7 +70,7 @@ jobs: (diff -r docs-base docs-head && echo 'No differences!' || true) | tee ./result.diff - name: Post comment with docs diff - uses: actions/github-script@v5 + uses: actions/github-script@v6 with: script: | var fs = require('fs'); diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index f2396ef7a04..a37c9e7a250 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -65,7 +65,7 @@ jobs: branch: master - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.python-architecture }} @@ -94,17 +94,21 @@ jobs: - name: Add git tag # The generated docs include PNL version, # set it to a fixed value to prevent polluting the diff + # This needs to be done after installing PNL + # to not interfere with dependency resolution + id: add_zero_tag if: github.event_name == 'pull_request' - run: git tag --force 'v999.999.999.999' + run: git tag --force 'v0.0.0.0' - name: Build Documentation run: make -C docs/ html -e SPHINXOPTS="-aE -j auto" - name: Remove git tag # The generated docs include PNL version, - # This was set to a fixed value to prevent polluting the diff - if: github.event_name == 'pull_request' && always() - run: git tag -d 'v999.999.999.999' + # A special tag was set to a fixed value + # to prevent polluting the diff + if: steps.add_zero_tag.outcome != 'skipped' + run: git tag -d 'v0.0.0.0' - name: Upload Documentation uses: actions/upload-artifact@v3 @@ -151,7 +155,7 @@ jobs: ref: gh-pages - name: Download branch docs - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 path: _built_docs/${{ github.ref }} @@ -168,7 +172,7 @@ jobs: if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' - name: Download main docs - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 # This overwrites files in current directory diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 25227973e1d..b97eaa55ecf 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -22,10 +22,6 @@ jobs: extra-args: [''] os: [ubuntu-latest, macos-latest, windows-latest] include: - # 3.7 is broken on macos-11, https://github.com/actions/virtual-environments/issues/4230 - - python-version: 3.7 - python-architecture: 'x64' - os: macos-10.15 # add 32-bit build on windows - python-version: 3.8 python-architecture: 'x86' @@ -54,7 +50,7 @@ jobs: run: git fetch --tags origin master - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.python-architecture }} diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index 32b6467d85e..0b7887ea5ee 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -21,7 +21,7 @@ jobs: uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} @@ -78,13 +78,13 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: Python-dist-files path: dist/ - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} @@ -141,7 +141,7 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: Python-dist-files path: dist/ @@ -175,13 +175,13 @@ jobs: steps: - name: Download dist files - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: Python-dist-files path: dist/ - name: Upload dist files to release - uses: actions/github-script@v5 + uses: actions/github-script@v6 with: script: | const fs = require('fs') diff --git a/.gitignore b/.gitignore index 0b0f973f543..84bfbe22d2a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,9 @@ # Created by https://www.gitignore.io/api/osx,python,pycharm -# Ignore JSON files created in tests/json/ +# Ignore JSON files created in tests/mdf/ # Maybe these should be generated in tmpdir instead -tests/json/*.json +tests/mdf/*.json # Log files created by SLURM jobs in this directory Scripts/Debug/predator_prey_opt/logs/ diff --git a/conftest.py b/conftest.py index 8521ae6c014..94a4de81cc4 100644 --- a/conftest.py +++ b/conftest.py @@ -33,7 +33,22 @@ def pytest_addoption(parser): parser.addoption('--{0}'.format(mark_stress_tests), action='store_true', default=False, help='Run {0} tests (long)'.format(mark_stress_tests)) + parser.addoption('--fp-precision', action='store', default='fp64', choices=['fp32', 'fp64'], + help='Set default fp precision for the runtime compiler. Default: fp64') + +def pytest_sessionstart(session): + precision = session.config.getvalue("--fp-precision") + if precision == 'fp64': + pnlvm.LLVMBuilderContext.default_float_ty = pnlvm.ir.DoubleType() + elif precision == 'fp32': + pnlvm.LLVMBuilderContext.default_float_ty = pnlvm.ir.FloatType() + else: + assert False, "Unsupported precision parameter: {}".format(precision) + def pytest_runtest_setup(item): + # Check that all 'cuda' tests are also marked 'llvm' + assert 'llvm' in item.keywords or 'cuda' not in item.keywords + for m in marks_default_skip: if m in item.keywords and not item.config.getvalue(m): pytest.skip('{0} tests not requested'.format(m)) @@ -97,6 +112,16 @@ def comp_mode_no_llvm(): # dummy fixture to allow 'comp_mode' filtering pass +@pytest.helpers.register +def llvm_current_fp_precision(): + float_ty = pnlvm.LLVMBuilderContext.get_current().float_ty + if float_ty == pnlvm.ir.DoubleType(): + return 'fp64' + elif float_ty == pnlvm.ir.FloatType(): + return 'fp32' + else: + assert False, "Unknown floating point type: {}".format(float_ty) + @pytest.helpers.register def get_comp_execution_modes(): return [pytest.param(pnlvm.ExecutionMode.Python), diff --git a/cuda_requirements.txt b/cuda_requirements.txt index 9a6d83d22c4..63e22850e71 100644 --- a/cuda_requirements.txt +++ b/cuda_requirements.txt @@ -1 +1 @@ -pycuda >2018, <2022 +pycuda >2018, <2023 diff --git a/dev_requirements.txt b/dev_requirements.txt index 95b05810996..ad283dfc78d 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,5 +1,5 @@ jupyter<=1.0.0 -pytest<7.1.2 +pytest<7.1.3 pytest-benchmark<3.4.2 pytest-cov<3.0.1 pytest-helpers-namespace<2021.12.30 diff --git a/doc_requirements.txt b/doc_requirements.txt index 043ea79e043..f4c95bd01e8 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,3 +1,3 @@ -psyneulink-sphinx-theme<1.2.3.1 +psyneulink-sphinx-theme<1.2.4.1 sphinx<4.2.1 sphinx_autodoc_typehints<1.16.0 diff --git a/docs/source/_static/Pathways_fig.svg b/docs/source/_static/Pathways_fig.svg new file mode 100644 index 00000000000..a13eea7854f --- /dev/null +++ b/docs/source/_static/Pathways_fig.svg @@ -0,0 +1,2489 @@ + + + + +[{A,B},C] + + + + + + + + + + + + + + + + + + + + + [A, + + + + + + + + + + + B], + + + + + + + + + + + [C, + + + + + + + + + + + D], + + + + + + + + + + + E, + + + + + + + + + + + F] + + + + + + + + + + + [A, + + + + + + + + + + + B], + + + + + + + + + + + [C,D], + + + + + + + + + + + {E, + + + + + + + + + + + F}] + + + + + + + + + + + [A, + + + + + + + + + + + B], + + + + + + + + + + + [C,D], + + + + + + + + + + + {E}, + + + + + + + + + + + {F}}] + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + C + + + + + + + + + + + + + + + + + + + + D + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + E + + + + + + + + + + + + + + + + + + + + F + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {A, B, C} + [[A],{B, C}] + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + C + + + + + + + i + + + + + + + + + + + + + + + + + + + + + + + [A, + + + + + + + + + + + B, + + + + + + + + + + + C] + + + + + + + + + + + [{A}, + + + + + + + + + + + B, + + + + + + + + + + + {C}] + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + C + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ii + + + + + + + + + + + + + + + + + + + + + + + + [A, {B, C}] + + + + + + + + + + + [{A}, {B, C}] + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + C + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + iii + + + + + + + + + + + + + + + + + + + + + + + [{A, + + + + + + + + + + + B}, + + + + + + + + + + + C] + + + + + + + + + + + [{A, + + + + + + + + + + + B}, + + + + + + + + + + + {C}] + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + C + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + iv + + + + + + + + + + + + + + + + + + + + + + + + [{A,B}, + + + + + + + + + + + {MappingProjection(B,D)}, C, D] + + + + + + + + + + + + C + + D + + + + + B + + + + + + + + + + + + + + A + + + + + + + + + + + + A + + + + + + + vi + + + + + + + + + + + + + + + + + + + + + + + [A,{B,C,D},E] + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + C + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + D + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + E + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + vii + viii + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + D + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + C + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + E + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + F + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + G + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + [{A, B, C}, D, {E, F, G}] + + + + + + + + + + v + + + + + + + + + + + + [{A, B}, {C, D}] + + + + A + + + + D + + + + + + + C + + + + + + + B + + + + + + + + + + + + + + + + + + + + + [{A, B}, [C, D]] + + + + C + + D + + + + + + + B + + + + A + + + + + + vi + + + + + + + + + + + + + + + icomp={B,C,D} + + + + + + + + + + + [A, {icomp}, {E}] + + + + + + + + + + + + + + + + + + + + icomp + + + + + + + + + + + + + + + + + + + + A + + + + + + + + + + + + + + + + + + + + D + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + C + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + E + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ix + +viii + + vii + + + + + + + + + + + A_F = MappingProjection(A, F) + + + C_D = MappingProjection(C, D) + + + [{A, B, C}, {A_F, C_D}, {D, E, F}] + + + + A + + + + F + + + + + + + C + + + + D + + + + + + + B + + + + E + + + + + + + + + + + + + + A_F = MappingProjection(A, F) + + + C_D = MappingProjection(C, D) + + + matrix = [3] + + + [{A, B, C}, [A_F, C_D, matrix], {D, E, F}] + + + + A + + + + F + + + + + + + D + + + + + + + E + + + + + + + B + + + + + + + + + + + + + C + + + + + + + + + + + + + x + + diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index bbe4c760afe..e62540ece0f 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -513,7 +513,7 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.globals.context import \ Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context -from psyneulink.core.globals.json import JSONDumpable +from psyneulink.core.globals.mdf import MDFSerializable from psyneulink.core.globals.keywords import \ CONTEXT, CONTROL_PROJECTION, DEFERRED_INITIALIZATION, EXECUTE_UNTIL_FINISHED, \ FUNCTION, FUNCTION_PARAMS, INIT_FULL_EXECUTE_METHOD, INPUT_PORTS, \ @@ -525,7 +525,7 @@ RESET_STATEFUL_FUNCTION_WHEN, VALUE, VARIABLE from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.parameters import \ - Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value + Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import BasePreferenceSet, VERBOSE_PREF from psyneulink.core.globals.preferences.preferenceset import \ PreferenceLevel, PreferenceSet, _assign_prefs @@ -724,7 +724,7 @@ def class_defaults(self): return self.defaults -class Component(JSONDumpable, metaclass=ComponentsMeta): +class Component(MDFSerializable, metaclass=ComponentsMeta): """ Component( \ default_variable=None, \ @@ -909,7 +909,7 @@ class Component(JSONDumpable, metaclass=ComponentsMeta): standard_constructor_args = [RESET_STATEFUL_FUNCTION_WHEN, EXECUTE_UNTIL_FINISHED, MAX_EXECUTIONS_BEFORE_FINISHED] - # helper attributes for JSON model spec + # helper attributes for MDF model spec _model_spec_id_parameters = 'parameters' _model_spec_id_stateful_parameters = 'stateful_parameters' @@ -1084,10 +1084,9 @@ def _parse_modulable(self, param_name, param_value): # insuring that assignment by one instance will not affect the value of others. name = None - _deepcopy_shared_keys = frozenset([ - '_init_args', - ]) + _deepcopy_shared_keys = frozenset([]) + @check_user_specified def __init__(self, default_variable, param_defaults, @@ -1303,6 +1302,9 @@ def __deepcopy__(self, memo): newone.parameters._owner = newone newone.defaults._owner = newone + for p in newone.parameters: + p._owner = newone.parameters + # by copying, this instance is no longer "inherent" to a single # 'import psyneulink' call newone._is_pnl_inherent = False @@ -1331,6 +1333,10 @@ def _get_compilation_state(self): if hasattr(self, 'nodes'): whitelist.add("num_executions") + # Drop combination function params from RTM if not needed + if getattr(self.parameters, 'has_recurrent_input_port', False): + blacklist.update(['combination_function']) + def _is_compilation_state(p): #FIXME: This should use defaults instead of 'p.get' return p.name not in blacklist and \ @@ -1362,7 +1368,7 @@ def _convert(p): state['buffer'], state['uinteger'], state['buffer_pos'], state['has_uint32'], x.used_seed[0])) elif isinstance(x, Time): - val = tuple(getattr(x, graph_scheduler.time._time_scale_to_attr_str(t)) for t in TimeScale) + val = tuple(x._get_by_time_scale(t) for t in TimeScale) elif isinstance(x, Component): return x._get_state_initializer(context) elif isinstance(x, ContentAddressableList): @@ -1423,6 +1429,10 @@ def _get_compilation_params(self): # "has_initializers" is only used by RTM blacklist.update(["has_initializers"]) + # Drop combination function params from RTM if not needed + if getattr(self.parameters, 'has_recurrent_input_port', False): + blacklist.update(['combination_function']) + def _is_compilation_param(p): if p.name not in blacklist and not isinstance(p, (ParameterAlias, SharedParameter)): #FIXME: this should use defaults @@ -2015,32 +2025,30 @@ def _initialize_parameters(self, context=None, **param_defaults): } if param_defaults is not None: - # Exclude any function_params from the items to set on this Component - # because these should just be pointers to the parameters of the same - # name on this Component's function - # Exclude any pass parameters whose value is None (assume this means "use the normal default") - d = { - k: v for (k, v) in param_defaults.items() - if ( - ( - k not in defaults - and k not in alias_names - ) - or v is not None - ) - } - for p in d: + for name, value in copy.copy(param_defaults).items(): try: - parameter_obj = getattr(self.parameters, p) + parameter_obj = getattr(self.parameters, name) except AttributeError: - # p in param_defaults does not correspond to a Parameter + # name in param_defaults does not correspond to a Parameter continue - if d[p] is not None: + if ( + name not in self._user_specified_args + and parameter_obj.constructor_argument not in self._user_specified_args + ): + continue + + if ( + ( + name in self._user_specified_args + or parameter_obj.constructor_argument in self._user_specified_args + ) + and (value is not None or parameter_obj.specify_none) + ): parameter_obj._user_specified = True if parameter_obj.structural: - parameter_obj.spec = d[p] + parameter_obj.spec = value if parameter_obj.modulable: # later, validate this @@ -2049,17 +2057,18 @@ def _initialize_parameters(self, context=None, **param_defaults): parse=True, modulable=True ) - parsed = modulable_param_parser(p, d[p]) + parsed = modulable_param_parser(name, value) - if parsed is not d[p]: + if parsed is not value: # we have a modulable param spec - parameter_obj.spec = d[p] - d[p] = parsed - param_defaults[p] = parsed + parameter_obj.spec = value + value = parsed + param_defaults[name] = parsed except AttributeError: pass - defaults.update(d) + if value is not None or parameter_obj.specify_none: + defaults[name] = value for k in defaults: defaults[k] = copy_parameter_value( @@ -3712,7 +3721,9 @@ def parse_parameter_value(value, no_expand_components=False, functions_as_dill=F else: try: value = value.as_mdf_model(simple_edge_format=False) - except TypeError: + except TypeError as e: + if "got an unexpected keyword argument 'simple_edge_format'" not in str(e): + raise value = value.as_mdf_model() elif isinstance(value, ComponentsMeta): value = value.__name__ diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index cda41d037bf..968cd52a77c 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -159,13 +159,13 @@ IDENTITY_MATRIX, INVERSE_HOLLOW_MATRIX, NAME, PREFERENCE_SET_NAME, RANDOM_CONNECTIVITY_MATRIX, VALUE, VARIABLE, MODEL_SPEC_ID_METADATA, MODEL_SPEC_ID_MDF_VARIABLE ) -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF, is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.registry import register_category from psyneulink.core.globals.utilities import ( convert_to_np_array, get_global_seed, is_instance_or_subclass, object_has_single_value, parameter_spec, parse_valid_identifier, safe_len, - SeededRandomState, contains_type + SeededRandomState, contains_type, is_numeric ) __all__ = [ @@ -605,6 +605,7 @@ def _validate_changes_shape(self, param): # Note: the following enforce encoding as 1D np.ndarrays (one array per variable) variableEncodingDim = 1 + @check_user_specified @abc.abstractmethod def __init__( self, @@ -897,7 +898,7 @@ def as_mdf_model(self): if typ not in mdf_functions.mdf_functions: warnings.warn(f'{typ} is not an MDF standard function, this is likely to produce an incompatible model.') - model.function = {typ: parameters[self._model_spec_id_parameters]} + model.function = typ return model @@ -995,6 +996,7 @@ class Manner(Enum): # These are used both to type-cast the params, and as defaults if none are assigned # in the initialization call or later (using either _instantiate_defaults or during a function call) + @check_user_specified def __init__(self, default_variable=None, propensity=10.0, @@ -1145,6 +1147,7 @@ class Parameters(Function_Base.Parameters): REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE), } + @check_user_specified @tc.typecheck def __init__(self, function, @@ -1185,7 +1188,14 @@ def get_matrix(specification, rows=1, cols=1, context=None): # Matrix provided (and validated in _validate_params); convert to array if isinstance(specification, (list, np.matrix)): - return convert_to_np_array(specification) + # # MODIFIED 4/9/22 OLD: + # return convert_to_np_array(specification) + # MODIFIED 4/9/22 NEW: + if is_numeric(specification): + return convert_to_np_array(specification) + else: + return + # MODIFIED 4/9/22 END if isinstance(specification, np.ndarray): if specification.ndim == 2: diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index e91fd02a118..be28b1d62eb 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -45,7 +45,7 @@ PREFERENCE_SET_NAME, VARIABLE from psyneulink.core.globals.utilities import convert_to_np_array, is_numeric, np_array_less_than_2d, parameter_spec from psyneulink.core.globals.context import ContextFlags -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, is_pref_set, PreferenceEntry, PreferenceLevel @@ -201,6 +201,7 @@ class Parameters(CombinationFunction.Parameters): offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) changes_shape = Parameter(True, stateful=False, loggable=False, pnl_internal=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -420,6 +421,7 @@ class Parameters(CombinationFunction.Parameters): scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -723,6 +725,7 @@ class Parameters(CombinationFunction.Parameters): offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) changes_shape = Parameter(True, stateful=False, loggable=False, pnl_internal=True) + @check_user_specified @tc.typecheck def __init__(self, # weights: tc.optional(parameter_spec)=None, @@ -1165,6 +1168,7 @@ class Parameters(CombinationFunction.Parameters): scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1689,6 +1693,7 @@ class Parameters(CombinationFunction.Parameters): scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1948,6 +1953,7 @@ class Parameters(CombinationFunction.Parameters): variable = Parameter(np.array([[1], [1]]), pnl_internal=True, constructor_argument='default_variable') gamma = Parameter(1.0, modulable=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index 91b255b14d4..b8a64bc1510 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -39,7 +39,7 @@ from psyneulink.core.globals.utilities import convert_to_np_array, parameter_spec from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified __all__ = [ 'DistributionFunction', 'DRIFT_RATE', 'DRIFT_RATE_VARIABILITY', 'DriftDiffusionAnalytical', 'ExponentialDist', @@ -159,6 +159,7 @@ class Parameters(DistributionFunction.Parameters): random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -341,6 +342,7 @@ class Parameters(DistributionFunction.Parameters): mean = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) standard_deviation = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -467,6 +469,7 @@ class Parameters(DistributionFunction.Parameters): random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -593,6 +596,7 @@ class Parameters(DistributionFunction.Parameters): random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -750,6 +754,7 @@ class Parameters(DistributionFunction.Parameters): scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) dist_shape = Parameter(1.0, modulable=True, aliases=[ADDITIVE_PARAM]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -884,6 +889,7 @@ class Parameters(DistributionFunction.Parameters): scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) mean = Parameter(1.0, modulable=True, aliases=[ADDITIVE_PARAM]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1120,6 +1126,7 @@ class Parameters(DistributionFunction.Parameters): read_only=True ) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index c00959d8f6b..c4727f52628 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -39,7 +39,7 @@ CONTRASTIVE_HEBBIAN_FUNCTION, TDLEARNING_FUNCTION, LEARNING_FUNCTION_TYPE, LEARNING_RATE, \ KOHONEN_FUNCTION, GAUSSIAN, LINEAR, EXPONENTIAL, HEBBIAN_FUNCTION, RL_FUNCTION, BACKPROPAGATION_FUNCTION, MATRIX, \ MSE, SSE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import is_numeric, scalar_distance, convert_to_np_array @@ -448,6 +448,7 @@ class Parameters(LearningFunction.Parameters): gamma_size_n = 1 gamma_size_prior = 1 + @check_user_specified def __init__(self, default_variable=None, mu_0=None, @@ -774,6 +775,7 @@ def _validate_distance_function(self, distance_function): default_learning_rate = 0.05 + @check_user_specified def __init__(self, default_variable=None, # learning_rate: tc.optional(tc.optional(parameter_spec)) = None, @@ -1045,6 +1047,7 @@ class Parameters(LearningFunction.Parameters): modulable=True) default_learning_rate = 0.05 + @check_user_specified def __init__(self, default_variable=None, learning_rate=None, @@ -1278,6 +1281,7 @@ class Parameters(LearningFunction.Parameters): default_learning_rate = 0.05 + @check_user_specified def __init__(self, default_variable=None, # learning_rate: tc.optional(tc.optional(parameter_spec)) = None, @@ -1585,6 +1589,7 @@ class Parameters(LearningFunction.Parameters): read_only=True ) + @check_user_specified def __init__(self, default_variable=None, # learning_rate: tc.optional(tc.optional(parameter_spec)) = None, @@ -1934,6 +1939,7 @@ class Parameters(LearningFunction.Parameters): default_learning_rate = 1.0 + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -2175,6 +2181,7 @@ class TDLearning(Reinforcement): """ componentName = TDLEARNING_FUNCTION + @check_user_specified def __init__(self, default_variable=None, learning_rate=None, diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 286cf63a86e..1e8ac37f370 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -33,7 +33,7 @@ DEFAULT_VARIABLE, DIFFERENCE, DISTANCE_FUNCTION, DISTANCE_METRICS, DistanceMetrics, \ ENERGY, ENTROPY, EUCLIDEAN, HOLLOW_MATRIX, MATRIX, MAX_ABS_DIFF, \ NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, SIZE, STABILITY_FUNCTION -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import is_distance_metric, safe_len, convert_to_np_array from psyneulink.core.globals.utilities import is_iterable @@ -206,6 +206,7 @@ class Parameters(ObjectiveFunction.Parameters): transfer_fct = Parameter(None, stateful=False, loggable=False) normalize = Parameter(False, stateful=False) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -558,6 +559,7 @@ class Energy(Stability): specifies the `PreferenceSet` for the Function (see `prefs ` for details). """ + @check_user_specified def __init__(self, default_variable=None, size=None, @@ -667,6 +669,7 @@ class Entropy(Stability): specifies the `PreferenceSet` for the Function (see `prefs ` for details). """ + @check_user_specified def __init__(self, default_variable=None, normalize:bool=None, @@ -779,6 +782,7 @@ class Parameters(ObjectiveFunction.Parameters): variable = Parameter(np.array([[0], [0]]), read_only=True, pnl_internal=True, constructor_argument='default_variable') metric = Parameter(DIFFERENCE, stateful=False) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 1f70a337c64..df8d182577c 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -48,7 +48,7 @@ from psyneulink.core.globals.keywords import \ BOUNDS, GRADIENT_OPTIMIZATION_FUNCTION, GRID_SEARCH_FUNCTION, GAUSSIAN_PROCESS_FUNCTION, \ OPTIMIZATION_FUNCTION_TYPE, OWNER, VALUE, VARIABLE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.sampleiterator import SampleIterator from psyneulink.core.globals.utilities import call_with_pruned_args @@ -404,6 +404,7 @@ class Parameters(Function_Base.Parameters): saved_samples = Parameter([], read_only=True, pnl_internal=True) saved_values = Parameter([], read_only=True, pnl_internal=True) + @check_user_specified @tc.typecheck def __init__( self, @@ -623,26 +624,26 @@ def _evaluate(self, variable=None, context=None, params=None): assert all([not getattr(self.parameters, x)._user_specified for x in self._unspecified_args]) self._unspecified_args = [] - # Get initial sample in case it is needed by _search_space_evaluate (e.g., for gradient initialization) - initial_sample = self._check_args(variable=variable, context=context, params=params) - try: - initial_value = self.owner.objective_mechanism.parameters.value._get(context) - except AttributeError: - initial_value = 0 - # EVALUATE ALL SAMPLES IN SEARCH SPACE # Evaluate all estimates of all samples in search_space - # If execution mode is not Python and search_space is static, use parallelized evaluation: - if (self.owner and self.owner.parameters.comp_execution_mode._get(context) != 'Python' and - all(isinstance(sample_iterator.start, Number) and isinstance(sample_iterator.stop, Number) - for sample_iterator in self.search_space)): - # FIX: NEED TO FIX THIS ONCE _grid_evaluate RETURNS all_samples - all_samples = [] + # Run compiled mode if requested by parameter and everything is initialized + if self.owner and self.owner.parameters.comp_execution_mode._get(context) != 'Python' and \ + ContextFlags.PROCESSING in context.flags: + all_samples = [s for s in itertools.product(*self.search_space)] all_values, num_evals = self._grid_evaluate(self.owner, context) + assert len(all_values) == num_evals + assert len(all_samples) == num_evals last_sample = last_value = None # Otherwise, default sequential sampling else: + # Get initial sample in case it is needed by _search_space_evaluate (e.g., for gradient initialization) + initial_sample = self._check_args(variable=variable, context=context, params=params) + try: + initial_value = self.owner.objective_mechanism.parameters.value._get(context) + except AttributeError: + initial_value = 0 + last_sample, last_value, all_samples, all_values = self._sequential_evaluate(initial_sample, initial_value, context) @@ -654,6 +655,11 @@ def _evaluate(self, variable=None, context=None, params=None): self.parameters.randomization_dimension._get(context) and \ self.parameters.num_estimates._get(context) is not None: + # FIXME: This is easy to support in hybrid mode. We just need to convert ctype results + # returned from _grid_evaluate to numpy + assert not self.owner or self.owner.parameters.comp_execution_mode._get(context) == 'Python', \ + "Aggregation function not supported in compiled mode!" + # Reshape all the values we encountered to group those that correspond to the same parameter values # can be aggregated. all_values = np.reshape(all_values, (-1, self.parameters.num_estimates._get(context))) @@ -752,6 +758,17 @@ def _sequential_evaluate(self, initial_sample, initial_value, context): def _grid_evaluate(self, ocm, context): """Helper method for evaluation of a grid of samples from search space via LLVM backends.""" + # If execution mode is not Python, the search space has to be static + def _is_static(it:SampleIterator): + if isinstance(it.start, Number) and isinstance(it.stop, Number): + return True + + if isinstance(it.generator, list): + return True + + return False + + assert all(_is_static(sample_iterator) for sample_iterator in self.search_space) assert ocm is ocm.agent_rep.controller # Compiled evaluate expects the same variable as mech function variable = [input_port.parameters.value.get(context) for input_port in ocm.input_ports] @@ -767,7 +784,6 @@ def _grid_evaluate(self, ocm, context): else: assert False, f"Unknown execution mode for {ocm.name}: {execution_mode}." - # FIX: RETURN SHOULD BE: outcomes, all_samples (THEN FIX CALL IN _function) return outcomes, num_evals def _report_value(self, new_value): @@ -1084,6 +1100,7 @@ def _parse_direction(self, direction): else: return -1 + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1486,6 +1503,7 @@ class Parameters(OptimizationFunction.Parameters): # TODO: should save_values be in the constructor if it's ignored? # is False or True the correct value? + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1805,30 +1823,6 @@ def _gen_llvm_function_body(self, ctx, builder, params, state_features, arg_in, builder.store(builder.load(min_value_ptr), out_value_ptr) return builder - def _run_grid(self, ocm, variable, context): - - # "ct" => c-type variables - ct_values, num_evals = self._grid_evaluate(ocm, context) - - assert len(ct_values) == num_evals - # Reduce array of values to min/max - # select_min params are: - # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, count - bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=frozenset({"select_min"})) - ct_param = bin_func.byref_arg_types[0](*self._get_param_initializer(context)) - ct_state = bin_func.byref_arg_types[1](*self._get_state_initializer(context)) - ct_opt_sample = bin_func.byref_arg_types[2](float("NaN")) - ct_alloc = None # NULL for samples - ct_opt_value = bin_func.byref_arg_types[4]() - ct_opt_count = bin_func.byref_arg_types[6](0) - ct_start = bin_func.c_func.argtypes[7](0) - ct_stop = bin_func.c_func.argtypes[8](len(ct_values)) - - bin_func(ct_param, ct_state, ct_opt_sample, ct_alloc, ct_opt_value, - ct_values, ct_opt_count, ct_start, ct_stop) - - return np.ctypeslib.as_array(ct_opt_sample), ct_opt_value.value, np.ctypeslib.as_array(ct_values) - def _function(self, variable=None, context=None, @@ -1953,15 +1947,37 @@ def _function(self, "PROGRAM ERROR: bad value for {} arg of {}: {}, {}". \ format(repr(DIRECTION), self.name, direction) - ocm = self._get_optimized_controller() + # Evaluate objective_function for each sample + last_sample, last_value, all_samples, all_values = self._evaluate( + variable=variable, + context=context, + params=params, + ) # Compiled version + ocm = self._get_optimized_controller() if ocm is not None and ocm.parameters.comp_execution_mode._get(context) in {"PTX", "LLVM"}: - opt_sample, opt_value, all_values = self._run_grid(ocm, variable, context) - # This should not be evaluated unless needed - all_samples = [s for s in itertools.product(*self.search_space)] - value_optimal = opt_value - sample_optimal = opt_sample + + # Reduce array of values to min/max + # select_min params are: + # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, count + bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=frozenset({"select_min"})) + ct_param = bin_func.byref_arg_types[0](*self._get_param_initializer(context)) + ct_state = bin_func.byref_arg_types[1](*self._get_state_initializer(context)) + ct_opt_sample = bin_func.byref_arg_types[2](float("NaN")) + ct_alloc = None # NULL for samples + ct_values = all_values + ct_opt_value = bin_func.byref_arg_types[4]() + ct_opt_count = bin_func.byref_arg_types[6](0) + ct_start = bin_func.c_func.argtypes[7](0) + ct_stop = bin_func.c_func.argtypes[8](len(ct_values)) + + bin_func(ct_param, ct_state, ct_opt_sample, ct_alloc, ct_opt_value, + ct_values, ct_opt_count, ct_start, ct_stop) + + value_optimal = ct_opt_value.value + sample_optimal = np.ctypeslib.as_array(ct_opt_sample) + all_values = np.ctypeslib.as_array(ct_values) # These are normally stored in the parent function (OptimizationFunction). # Since we didn't call super()._function like the python path, @@ -1974,12 +1990,6 @@ def _function(self, # Python version else: - # Evaluate objective_function for each sample - last_sample, last_value, all_samples, all_values = self._evaluate( - variable=variable, - context=context, - params=params, - ) if all_values.size != all_samples.shape[-1]: raise ValueError(f"OptimizationFunction Error: {self}._evaluate returned mismatched sizes for " @@ -2198,6 +2208,7 @@ class Parameters(OptimizationFunction.Parameters): # TODO: should save_values be in the constructor if it's ignored? # is False or True the correct value? + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -2452,12 +2463,13 @@ class Parameters(OptimizationFunction.Parameters): :default value: True :type: ``bool`` """ - variable = Parameter([[0], [0]], read_only=True) + variable = Parameter([[0], [0]], read_only=True, constructor_argument='default_variable') random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) save_samples = True save_values = True + @check_user_specified @tc.typecheck def __init__(self, priors, diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index aff4dc5764f..626f1ade454 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -36,7 +36,7 @@ from psyneulink.core.globals.keywords import \ MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, \ MODE, ONE_HOT_FUNCTION, PROB, PROB_INDICATOR, SELECTION_FUNCTION_TYPE, PREFERENCE_SET_NAME -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, is_pref_set @@ -201,6 +201,7 @@ def _validate_mode(self, mode): # returns error message return 'not one of {0}'.format(options) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 5bce6445bba..eef16ca3f36 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -70,7 +70,7 @@ RATE, RECEIVER, RELU_FUNCTION, SCALE, SLOPE, SOFTMAX_FUNCTION, STANDARD_DEVIATION, SUM, \ TRANSFER_FUNCTION_TYPE, TRANSFER_WITH_COSTS_FUNCTION, VARIANCE, VARIABLE, X_0, PREFERENCE_SET_NAME from psyneulink.core.globals.parameters import \ - FunctionParameter, Parameter, get_validator_by_function + FunctionParameter, Parameter, get_validator_by_function, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, is_pref_set from psyneulink.core.globals.utilities import parameter_spec, safe_len @@ -197,6 +197,7 @@ class Identity(TransferFunction): # ------------------------------------------- REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE), } + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -364,6 +365,7 @@ class Parameters(TransferFunction.Parameters): slope = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) intercept = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -625,6 +627,7 @@ class Parameters(TransferFunction.Parameters): offset = Parameter(0.0, modulable=True) bounds = (0, None) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -915,6 +918,7 @@ class Parameters(TransferFunction.Parameters): scale = Parameter(1.0, modulable=True) bounds = (0, 1) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1233,6 +1237,7 @@ class Parameters(TransferFunction.Parameters): scale = Parameter(1.0, modulable=True) bounds = (0, 1) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1497,6 +1502,7 @@ class Parameters(TransferFunction.Parameters): leak = Parameter(0.0, modulable=True) bounds = (None, None) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1705,6 +1711,7 @@ def _validate_variable(self, variable): if variable.ndim != 1 or len(variable) < 2: return f"must be list or 1d array of length 2 or greater." + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1970,6 +1977,7 @@ class Parameters(TransferFunction.Parameters): offset = Parameter(0.0, modulable=True) bounds = (None, None) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -2243,6 +2251,7 @@ class Parameters(TransferFunction.Parameters): seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) bounds = (None, None) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -2523,6 +2532,7 @@ def _validate_output(self, output): else: return 'not one of {0}'.format(options) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -2578,7 +2588,7 @@ def _validate_variable(self, variable, context=None): return np.asarray(variable) - def __gen_llvm_exp_sum_max(self, builder, index, ctx, vi, gain, max_ptr, exp_sum_ptr, max_ind_ptr): + def __gen_llvm_exp_sum(self, builder, index, ctx, vi, gain, exp_sum_ptr): ptri = builder.gep(vi, [ctx.int32_ty(0), index]) exp_f = ctx.get_builtin("exp", [ctx.float_ty]) @@ -2590,17 +2600,7 @@ def __gen_llvm_exp_sum_max(self, builder, index, ctx, vi, gain, max_ptr, exp_sum new_exp_sum = builder.fadd(exp_sum, exp_val) builder.store(new_exp_sum, exp_sum_ptr) - old_max = builder.load(max_ptr) - gt = builder.fcmp_ordered(">", exp_val, old_max) - new_max = builder.select(gt, exp_val, old_max) - builder.store(new_max, max_ptr) - - old_index = builder.load(max_ind_ptr) - new_index = builder.select(gt, index, old_index) - builder.store(new_index, max_ind_ptr) - def __gen_llvm_exp_div(self, builder, index, ctx, vi, vo, gain, exp_sum): - assert self.output == ALL ptro = builder.gep(vo, [ctx.int32_ty(0), index]) ptri = builder.gep(vi, [ctx.int32_ty(0), index]) exp_f = ctx.get_builtin("exp", [ctx.float_ty]) @@ -2611,65 +2611,70 @@ def __gen_llvm_exp_div(self, builder, index, ctx, vi, vo, gain, exp_sum): builder.store(val, ptro) - def __gen_llvm_apply(self, ctx, builder, params, _, arg_in, arg_out): + def __gen_llvm_apply(self, ctx, builder, params, state, arg_in, arg_out, tags:frozenset): exp_sum_ptr = builder.alloca(ctx.float_ty) builder.store(exp_sum_ptr.type.pointee(0), exp_sum_ptr) - max_ptr = builder.alloca(ctx.float_ty) - builder.store(max_ptr.type.pointee(float('-inf')), max_ptr) - - max_ind_ptr = builder.alloca(ctx.int32_ty) - builder.store(max_ind_ptr.type.pointee(-1), max_ind_ptr) - gain_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, GAIN) gain = pnlvm.helpers.load_extract_scalar_array_one(builder, gain_ptr) with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_sum_max") as args: - self.__gen_llvm_exp_sum_max(*args, ctx=ctx, vi=arg_in, - max_ptr=max_ptr, gain=gain, - max_ind_ptr=max_ind_ptr, - exp_sum_ptr=exp_sum_ptr) + self.__gen_llvm_exp_sum(*args, ctx=ctx, vi=arg_in, gain=gain, + exp_sum_ptr=exp_sum_ptr) - output_type = self.output exp_sum = builder.load(exp_sum_ptr) - index = builder.load(max_ind_ptr) - ptro = builder.gep(arg_out, [ctx.int32_ty(0), index]) - if output_type == ALL: + if self.output == ALL: with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_div") as args: self.__gen_llvm_exp_div(ctx=ctx, vi=arg_in, vo=arg_out, gain=gain, exp_sum=exp_sum, *args) - elif output_type == MAX_VAL: - # zero out the output array - with pnlvm.helpers.array_ptr_loop(builder, arg_in, "zero_output") as (b,i): - b.store(ctx.float_ty(0), b.gep(arg_out, [ctx.int32_ty(0), i])) - - ptri = builder.gep(arg_in, [ctx.int32_ty(0), index]) - exp_f = ctx.get_builtin("exp", [ctx.float_ty]) - orig_val = builder.load(ptri) - val = builder.fmul(orig_val, gain) - val = builder.call(exp_f, [val]) - val = builder.fdiv(val, exp_sum) - builder.store(val, ptro) - elif output_type == MAX_INDICATOR: - # zero out the output array - with pnlvm.helpers.array_ptr_loop(builder, arg_in, "zero_output") as (b,i): - b.store(ctx.float_ty(0), b.gep(arg_out, [ctx.int32_ty(0), i])) - builder.store(ctx.float_ty(1), ptro) + return builder + + one_hot_f = ctx.import_llvm_function(self.one_hot_function, tags=tags) + one_hot_p = pnlvm.helpers.get_param_ptr(builder, self, params, 'one_hot_function') + one_hot_s = pnlvm.helpers.get_state_ptr(builder, self, state, 'one_hot_function') + + assert one_hot_f.args[3].type == arg_out.type + one_hot_out = arg_out + one_hot_in = builder.alloca(one_hot_f.args[2].type.pointee) + + if self.output in {MAX_VAL, MAX_INDICATOR}: + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_div") as (b, i): + self.__gen_llvm_exp_div(ctx=ctx, vi=arg_in, vo=one_hot_in, + gain=gain, exp_sum=exp_sum, builder=b, index=i) + + builder.call(one_hot_f, [one_hot_p, one_hot_s, one_hot_in, one_hot_out]) + + elif self.output == PROB: + one_hot_in_data = builder.gep(one_hot_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) + one_hot_in_dist = builder.gep(one_hot_in, [ctx.int32_ty(0), ctx.int32_ty(1)]) + + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_div") as (b, i): + self.__gen_llvm_exp_div(ctx=ctx, vi=arg_in, vo=one_hot_in_dist, + gain=gain, exp_sum=exp_sum, builder=b, index=i) + + dist_in = b.gep(arg_in, [ctx.int32_ty(0), i]) + dist_out = b.gep(one_hot_in_data, [ctx.int32_ty(0), i]) + b.store(b.load(dist_in), dist_out) + + + builder.call(one_hot_f, [one_hot_p, one_hot_s, one_hot_in, one_hot_out]) + else: + assert False, "Unsupported output in {}: {}".format(self, self.output) return builder - def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out, *, tags:frozenset): + def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): if self.parameters.per_item.get(): assert isinstance(arg_in.type.pointee.element, pnlvm.ir.ArrayType) assert isinstance(arg_out.type.pointee.element, pnlvm.ir.ArrayType) for i in range(arg_in.type.pointee.count): inner_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(i)]) inner_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(i)]) - builder = self.__gen_llvm_apply(ctx, builder, params, _, inner_in, inner_out) + builder = self.__gen_llvm_apply(ctx, builder, params, state, inner_in, inner_out, tags=tags) return builder else: - return self.__gen_llvm_apply(ctx, builder, params, _, arg_in, arg_out) + return self.__gen_llvm_apply(ctx, builder, params, state, arg_in, arg_out, tags=tags) def apply_softmax(self, input_value, gain, output_type): # Modulate input_value by gain @@ -2925,6 +2930,7 @@ class Parameters(TransferFunction.Parameters): # return True # return False + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -3842,8 +3848,7 @@ class Parameters(TransferFunction.Parameters): :default value: None :type: """ - variable = Parameter(np.array([0]), - history_min_length=1) + variable = Parameter(np.array([0]), history_min_length=1, constructor_argument='default_variable') intensity = Parameter(np.zeros_like(variable.default_value), history_min_length=1) @@ -3927,6 +3932,7 @@ class Parameters(TransferFunction.Parameters): function_parameter_name=ADDITIVE_PARAM, ) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index dd344d3b9f0..afca06fdb6e 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -48,7 +48,7 @@ INTERACTIVE_ACTIVATION_INTEGRATOR_FUNCTION, LEAKY_COMPETING_INTEGRATOR_FUNCTION, \ MULTIPLICATIVE_PARAM, NOISE, OFFSET, OPERATION, ORNSTEIN_UHLENBECK_INTEGRATOR_FUNCTION, OUTPUT_PORTS, PRODUCT, \ RATE, REST, SIMPLE_INTEGRATOR_FUNCTION, SUM, TIME_STEP_SIZE, THRESHOLD, VARIABLE, MODEL_SPEC_ID_MDF_VARIABLE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import parameter_spec, all_within_range, \ convert_all_elements_to_np_array @@ -220,6 +220,7 @@ class Parameters(StatefulFunction.Parameters): previous_value = Parameter(np.array([0]), initializer='initializer') initializer = Parameter(np.array([0]), pnl_internal=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -550,6 +551,7 @@ class Parameters(IntegratorFunction.Parameters): rate = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM], function_arg=True) increment = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM], function_arg=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -826,6 +828,7 @@ class Parameters(IntegratorFunction.Parameters): rate = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM], function_arg=True) offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM], function_arg=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1061,6 +1064,7 @@ class Parameters(IntegratorFunction.Parameters): rate = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM], function_arg=True) offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM], function_arg=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1573,6 +1577,7 @@ class Parameters(IntegratorFunction.Parameters): long_term_logistic = None + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -2014,6 +2019,7 @@ class Parameters(IntegratorFunction.Parameters): max_val = Parameter(1.0, function_arg=True) min_val = Parameter(-1.0, function_arg=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -2418,6 +2424,7 @@ def _parse_initializer(self, initializer): else: return initializer + @check_user_specified @tc.typecheck def __init__( self, @@ -2531,10 +2538,6 @@ def _gen_llvm_integrate(self, builder, index, ctx, vi, vo, params, state): builder.call(rand_f, [random_state, rand_val_ptr]) rand_val = builder.load(rand_val_ptr) - if isinstance(rate.type, pnlvm.ir.ArrayType): - assert len(rate.type) == 1 - rate = builder.extract_value(rate, 0) - # Get state pointers prev_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, "previous_value") prev_time_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, "previous_time") @@ -2543,10 +2546,8 @@ def _gen_llvm_integrate(self, builder, index, ctx, vi, vo, params, state): # + np.sqrt(time_step_size * noise) * random_state.normal() prev_val_ptr = builder.gep(prev_ptr, [ctx.int32_ty(0), index]) prev_val = builder.load(prev_val_ptr) + val = builder.load(builder.gep(vi, [ctx.int32_ty(0), index])) - if isinstance(val.type, pnlvm.ir.ArrayType): - assert len(val.type) == 1 - val = builder.extract_value(val, 0) val = builder.fmul(val, rate) val = builder.fmul(val, time_step_size) val = builder.fadd(val, prev_val) @@ -2894,7 +2895,7 @@ class Parameters(IntegratorFunction.Parameters): # threshold = Parameter(100.0, modulable=True) time_step_size = Parameter(1.0, modulable=True) previous_time = Parameter(None, initializer='starting_point', pnl_internal=True) - dimension = Parameter(2, stateful=False, read_only=True) + dimension = Parameter(3, stateful=False, read_only=True) initializer = Parameter([0], initalizer='variable', stateful=True) angle_function = Parameter(None, stateful=False, loggable=False) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') @@ -2933,6 +2934,7 @@ def _parse_noise(self, noise): noise = np.array(noise) return noise + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -3439,6 +3441,7 @@ class Parameters(IntegratorFunction.Parameters): read_only=True ) + @check_user_specified @tc.typecheck def __init__( self, @@ -3733,6 +3736,7 @@ class Parameters(IntegratorFunction.Parameters): offset = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM], function_arg=True) time_step_size = Parameter(0.1, modulable=True, function_arg=True) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -4414,6 +4418,7 @@ class Parameters(IntegratorFunction.Parameters): read_only=True ) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index abade02079c..c6fb7d67731 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -45,7 +45,7 @@ ADDITIVE_PARAM, BUFFER_FUNCTION, MEMORY_FUNCTION, COSINE, \ ContentAddressableMemory_FUNCTION, DictionaryMemory_FUNCTION, \ MIN_INDICATOR, MULTIPLICATIVE_PARAM, NEWEST, NOISE, OLDEST, OVERWRITE, RATE, RANDOM, VARIABLE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import \ all_within_range, convert_to_np_array, convert_to_list, convert_all_elements_to_np_array @@ -56,6 +56,16 @@ class MemoryFunction(StatefulFunction): # ----------------------------------------------------------------------------- componentType = MEMORY_FUNCTION + # TODO: refactor to avoid skip of direct super + def _update_default_variable(self, new_default_variable, context=None): + if not self.parameters.initializer._user_specified: + # use * 0 instead of zeros_like to deal with ragged arrays + self._initialize_previous_value([new_default_variable * 0], context) + + # bypass the additional _initialize_previous_value call used by + # other stateful functions + super(StatefulFunction, self)._update_default_variable(new_default_variable, context=context) + class Buffer(MemoryFunction): # ------------------------------------------------------------------------------ """ @@ -215,6 +225,7 @@ class Parameters(StatefulFunction.Parameters): changes_shape = Parameter(True, stateful=False, loggable=False, pnl_internal=True) + @check_user_specified @tc.typecheck def __init__(self, # FIX: 12/11/18 JDC - NOT SAFE TO SPECIFY A MUTABLE TYPE AS DEFAULT @@ -259,16 +270,6 @@ def _initialize_previous_value(self, initializer, context=None): return previous_value - # TODO: Buffer variable fix: remove this or refactor to avoid skip - # of direct super - def _update_default_variable(self, new_default_variable, context=None): - if not self.parameters.initializer._user_specified: - self._initialize_previous_value([np.zeros_like(new_default_variable)], context) - - # bypass the additional _initialize_previous_value call used by - # other stateful functions - super(StatefulFunction, self)._update_default_variable(new_default_variable, context=context) - def _instantiate_attributes_before_function(self, function=None, context=None): self.parameters.previous_value._set( self._initialize_previous_value( @@ -1152,6 +1153,7 @@ def _parse_initializer(self, initializer): initializer = ContentAddressableMemory._enforce_memory_shape(initializer) return initializer + @check_user_specified @tc.typecheck def __init__(self, # FIX: REINSTATE WHEN 3.6 IS RETIRED: @@ -2173,6 +2175,7 @@ class Parameters(StatefulFunction.Parameters): selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/stateful/statefulfunction.py b/psyneulink/core/components/functions/stateful/statefulfunction.py index 1a365aca476..5e22d460526 100644 --- a/psyneulink/core/components/functions/stateful/statefulfunction.py +++ b/psyneulink/core/components/functions/stateful/statefulfunction.py @@ -30,7 +30,7 @@ from psyneulink.core.components.functions.function import Function_Base, FunctionError, _noise_setter from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import iscompatible, convert_to_np_array, contains_type @@ -213,6 +213,7 @@ def _validate_noise(self, noise): return 'functions in a list must be instantiated and have the desired noise variable shape' @handle_external_context() + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/functions/userdefinedfunction.py b/psyneulink/core/components/functions/userdefinedfunction.py index 176eff725c7..0cb5db217f3 100644 --- a/psyneulink/core/components/functions/userdefinedfunction.py +++ b/psyneulink/core/components/functions/userdefinedfunction.py @@ -18,7 +18,7 @@ from psyneulink.core.globals.keywords import \ CONTEXT, CUSTOM_FUNCTION, OWNER, PARAMS, \ SELF, USER_DEFINED_FUNCTION, USER_DEFINED_FUNCTION_TYPE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences import is_pref_set from psyneulink.core.globals.utilities import _is_module_class, iscompatible @@ -450,6 +450,7 @@ class Parameters(Function_Base.Parameters): pnl_internal=True, ) + @check_user_specified @tc.typecheck def __init__(self, custom_function=None, diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index e174f9004e2..567c2f3eeca 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -1098,7 +1098,7 @@ REMOVE_PORTS, PORT_SPEC, _parse_port_spec, PORT_SPECIFIC_PARAMS, PROJECTION_SPECIFIC_PARAMS from psyneulink.core.components.shellclasses import Mechanism, Projection, Port from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context -from psyneulink.core.globals.json import _get_variable_parameter_name, _substitute_expression_args +from psyneulink.core.globals.mdf import _get_variable_parameter_name # TODO: remove unused keywords from psyneulink.core.globals.keywords import \ ADDITIVE_PARAM, EXECUTION_PHASE, EXPONENT, FUNCTION_PARAMS, \ @@ -1109,7 +1109,7 @@ NAME, OUTPUT, OUTPUT_LABELS_DICT, OUTPUT_PORT, OUTPUT_PORT_PARAMS, OUTPUT_PORTS, OWNER_EXECUTION_COUNT, OWNER_VALUE, \ PARAMETER_PORT, PARAMETER_PORT_PARAMS, PARAMETER_PORTS, PROJECTIONS, REFERENCE_VALUE, RESULT, \ TARGET_LABELS_DICT, VALUE, VARIABLE, WEIGHT, MODEL_SPEC_ID_MDF_VARIABLE, MODEL_SPEC_ID_INPUT_PORT_COMBINATION_FUNCTION -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import register_category, remove_instance_from_registry from psyneulink.core.globals.utilities import \ @@ -1680,6 +1680,7 @@ def _parse_output_ports(self, output_ports): @tc.typecheck @abc.abstractmethod + @check_user_specified def __init__(self, default_variable=None, size=None, @@ -2915,9 +2916,12 @@ def _gen_llvm_ports(self, ctx, builder, ports, group, # the function result can result in 1d structure or scalar # Casting the pointer is LLVM way of adding dimensions array_1d = pnlvm.ir.ArrayType(p_input_data.type.pointee, 1) - array_2d = pnlvm.ir.ArrayType(array_1d, 1) - assert array_1d == p_function.args[2].type.pointee or array_2d == p_function.args[2].type.pointee, \ + assert array_1d == p_function.args[2].type.pointee, \ "{} vs. {}".format(p_function.args[2].type.pointee, p_input_data.type.pointee) + # restrict shape matching to casting 1d values to 2d arrays + # for Control/Gating signals + assert len(p_function.args[2].type.pointee) == 1 + assert str(port).startswith("(ControlSignal") or str(port).startswith("(GatingSignal") p_input = builder.bitcast(p_input_data, p_function.args[2].type) else: @@ -3032,7 +3036,7 @@ def _gen_llvm_output_port_parse_variable(self, ctx, builder, except TypeError as e: # TypeError means we can't index. # Convert this to assertion failure below - pass + data = None else: #TODO: support more spec options if name == OWNER_VALUE: @@ -3042,18 +3046,19 @@ def _gen_llvm_output_port_parse_variable(self, ctx, builder, else: data = None - if data is not None: - parsed = builder.gep(data, [ctx.int32_ty(0), *(ctx.int32_ty(i) for i in ids)]) - # "num_executions" are kept as int64, we need to convert the value to float first - if name == "num_executions": - count = builder.load(parsed) - count_fp = builder.uitofp(count, ctx.float_ty) - parsed = builder.alloca(count_fp.type) - builder.store(count_fp, parsed) + assert data is not None, "Unsupported OutputPort spec: {} ({})".format(port_spec, value.type) - return parsed + parsed = builder.gep(data, [ctx.int32_ty(0), *(ctx.int32_ty(i) for i in ids)]) + # "num_executions" are kept as int64, we need to convert the value to float first + # port inputs are also expected to be 1d arrays + if name == "num_executions": + count = builder.load(parsed) + count_fp = builder.uitofp(count, ctx.float_ty) + parsed = builder.alloca(pnlvm.ir.ArrayType(count_fp.type, 1)) + ptr = builder.gep(parsed, [ctx.int32_ty(0), ctx.int32_ty(0)]) + builder.store(count_fp, ptr) - assert False, "Unsupported OutputPort spec: {} ({})".format(port_spec, value.type) + return parsed def _gen_llvm_output_ports(self, ctx, builder, value, mech_params, mech_state, mech_in, mech_out): @@ -3071,29 +3076,34 @@ def _get_input_data_ptr(b, i): mech_params, mech_state, mech_in) return builder - def _gen_llvm_invoke_function(self, ctx, builder, function, f_params, f_state, variable, *, tags:frozenset): + def _gen_llvm_invoke_function(self, ctx, builder, function, f_params, f_state, + variable, out, *, tags:frozenset): + fun = ctx.import_llvm_function(function, tags=tags) - fun_out = builder.alloca(fun.args[3].type.pointee, name=function.name + "_output") + if out is None: + f_out = builder.alloca(fun.args[3].type.pointee, name=function.name + "_output") + else: + f_out = out - builder.call(fun, [f_params, f_state, variable, fun_out]) + builder.call(fun, [f_params, f_state, variable, f_out]) - return fun_out, builder + return f_out, builder def _gen_llvm_is_finished_cond(self, ctx, builder, m_params, m_state): return ctx.bool_ty(1) - def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, m_state, arg_in, - ip_output, *, tags:frozenset): + def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, m_state, m_in, + m_val, ip_output, *, tags:frozenset): # Default mechanism runs only the main function f_base_params = pnlvm.helpers.get_param_ptr(builder, self, m_base_params, "function") f_params, builder = self._gen_llvm_param_ports_for_obj( - self.function, f_base_params, ctx, builder, m_base_params, m_state, arg_in) + self.function, f_base_params, ctx, builder, m_base_params, m_state, m_in) f_state = pnlvm.helpers.get_state_ptr(builder, self, m_state, "function") return self._gen_llvm_invoke_function(ctx, builder, self.function, f_params, f_state, ip_output, - tags=tags) + m_val, tags=tags) def _gen_llvm_function_internal(self, ctx, builder, m_params, m_state, arg_in, arg_out, m_base_params, *, tags:frozenset): @@ -3101,11 +3111,21 @@ def _gen_llvm_function_internal(self, ctx, builder, m_params, m_state, arg_in, ip_output, builder = self._gen_llvm_input_ports(ctx, builder, m_base_params, m_state, arg_in) + # This will move history items around to make space for a new entry + mech_val_ptr = pnlvm.helpers.get_state_space(builder, self, m_state, "value") + value, builder = self._gen_llvm_mechanism_functions(ctx, builder, m_base_params, m_params, m_state, arg_in, + mech_val_ptr, ip_output, tags=tags) + if mech_val_ptr.type.pointee == value.type.pointee: + assert value is mech_val_ptr + else: + # FIXME: Does this need some sort of parsing? + warnings.warn("Shape mismatch: function result does not match mechanism value param: {} vs. {}".format(value.type.pointee, mech_val_ptr.type.pointee)) + # Update num_executions parameter num_executions_ptr = pnlvm.helpers.get_state_ptr(builder, self, m_state, "num_executions") for scale in TimeScale: @@ -3117,13 +3137,6 @@ def _gen_llvm_function_internal(self, ctx, builder, m_params, m_state, arg_in, new_val = builder.add(new_val, new_val.type(1)) builder.store(new_val, num_exec_time_ptr) - val_ptr = pnlvm.helpers.get_state_ptr(builder, self, m_state, "value") - if val_ptr.type.pointee == value.type.pointee: - pnlvm.helpers.push_state_val(builder, self, m_state, "value", value) - else: - # FIXME: Does this need some sort of parsing? - warnings.warn("Shape mismatch: function result does not match mechanism value param: {} vs. {}".format(value.type.pointee, val_ptr.type.pointee)) - # Run output ports after updating the mech state (num_executions and value) builder = self._gen_llvm_output_ports(ctx, builder, value, m_base_params, m_state, arg_in, arg_out) @@ -4155,7 +4168,7 @@ def as_mdf_model(self): model.functions.append( mdf.Function( id=combination_function_id, - function={'onnx::ReduceSum': combination_function_args}, + function='onnx::ReduceSum', args=combination_function_args ) ) @@ -4196,9 +4209,6 @@ def as_mdf_model(self): ) model.functions.append(function_model) - for func_model in model.functions: - _substitute_expression_args(func_model) - return model diff --git a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py index 3da410a7ae5..b4d82a6662e 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py @@ -378,11 +378,12 @@ A ControlMechanism's `function ` uses its `outcome ` attribute (the `value ` of its *OUTCOME* `InputPort`) to generate a `control_allocation `. By default, its `function ` is assigned -the `DefaultAllocationFunction`, which takes a single value as its input, and assigns that as the value of -each item of `control_allocation `. Each of these items is assigned as -the allocation for the corresponding `ControlSignal` in `control_signals `. This +the `Identity`, which takes a single value as its input, and copies it to the output, this assigns the value of +each item of `control_allocation `. This item is assigned as +the allocation for the all `ControlSignal` in `control_signals `. This distributes the ControlMechanism's input as the allocation to each of its `control_signals -`. This same behavior also applies to any custom function assigned to a +`. +This same behavior also applies to any custom function assigned to a ControlMechanism that returns a 2d array with a single item in its outer dimension (axis 0). If a function is assigned that returns a 2d array with more than one item, and it has the same number of `control_signals `, then each ControlSignal is assigned to the corresponding item of the function's @@ -587,8 +588,8 @@ import numpy as np import typecheck as tc -from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import Function_Base, is_function_type +from psyneulink.core.components.functions.nonstateful.transferfunctions import Identity from psyneulink.core.components.functions.nonstateful.combinationfunctions import Concatenate from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination from psyneulink.core.components.mechanisms.mechanism import Mechanism, Mechanism_Base @@ -605,14 +606,13 @@ MECHANISM, MULTIPLICATIVE, MODULATORY_SIGNALS, MONITOR_FOR_CONTROL, MONITOR_FOR_MODULATION, \ OBJECTIVE_MECHANISM, OUTCOME, OWNER_VALUE, PARAMS, PORT_TYPE, PRODUCT, PROJECTION_TYPE, PROJECTIONS, \ SEPARATE, SIZE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_list, convert_to_np_array, is_iterable __all__ = [ 'CONTROL_ALLOCATION', 'GATING_ALLOCATION', 'ControlMechanism', 'ControlMechanismError', 'ControlMechanismRegistry', - 'DefaultAllocationFunction' ] CONTROL_ALLOCATION = 'control_allocation' @@ -727,58 +727,6 @@ def _net_outcome_getter(owning_component=None, context=None): return [0] -class DefaultAllocationFunction(Function_Base): - """Take a single 1d item and return a 2d array with n identical items - Takes the default input (a single value in the *OUTCOME* InputPort of the ControlMechanism), - and returns the same allocation for each of its `control_signals `. - """ - componentName = 'Default Control Function' - class Parameters(Function_Base.Parameters): - """ - Attributes - ---------- - - num_control_signals - see `num_control_signals ` - - :default value: 1 - :type: ``int`` - """ - num_control_signals = Parameter(1, stateful=False) - - def __init__(self, - default_variable=None, - params=None, - owner=None - ): - - super().__init__(default_variable=default_variable, - params=params, - owner=owner, - ) - - def _function(self, - variable=None, - context=None, - params=None, - ): - num_ctl_sigs = self._get_current_parameter_value('num_control_signals') - result = np.array([variable[0]] * num_ctl_sigs) - return self.convert_output_type(result) - - def reset(self, *args, force=False, context=None, **kwargs): - # Override Component.reset which requires that the Component is stateful - pass - - def _gen_llvm_function_body(self, ctx, builder, _1, _2, arg_in, arg_out, *, tags:frozenset): - val_ptr = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - val = builder.load(val_ptr) - with pnlvm.helpers.array_ptr_loop(builder, arg_out, "alloc_loop") as (b, idx): - out_ptr = builder.gep(arg_out, [ctx.int32_ty(0), idx]) - builder.store(val, out_ptr) - return builder - - class ControlMechanism(ModulatoryMechanism_Base): """ ControlMechanism( \ @@ -1201,7 +1149,7 @@ class Parameters(ModulatoryMechanism_Base.Parameters): ) monitor_for_control = Parameter( - [OUTCOME], + [], stateful=False, loggable=False, read_only=True, @@ -1218,6 +1166,7 @@ class Parameters(ModulatoryMechanism_Base.Parameters): aliases=[CONTROL, CONTROL_SIGNALS], constructor_argument=CONTROL ) + function = Parameter(Identity, stateful=False, loggable=False) def _parse_output_ports(self, output_ports): def is_2tuple(o): @@ -1263,6 +1212,7 @@ def _validate_input_ports(self, input_ports): # method? # validate_monitored_port_spec(self._owner, input_ports) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1329,8 +1279,6 @@ def __init__(self, f"creating unnecessary and/or duplicated Components.") control = convert_to_list(args) - function = function or DefaultAllocationFunction - super(ControlMechanism, self).__init__( default_variable=default_variable, size=size, @@ -1727,42 +1675,33 @@ def _register_control_signal_type(self, context=None): def _instantiate_control_signals(self, context): """Subclasses can override for class-specific implementation (see OptimizationControlMechanism for example)""" - output_port_specs = list(enumerate(self.output_ports)) - for i, control_signal in output_port_specs: + for i, control_signal in enumerate(self.output_ports): self.control[i] = self._instantiate_control_signal(control_signal, context=context) - num_control_signals = i + 1 - - # For DefaultAllocationFunction, set defaults.value to have number of items equal to num control_signals - if isinstance(self.function, DefaultAllocationFunction): - self.defaults.value = np.tile(self.function.value, (num_control_signals, 1)) - self.parameters.control_allocation._set(copy.deepcopy(self.defaults.value), context) - self.function.num_control_signals = num_control_signals - # For other functions, assume that if its value has: + # For functions, assume that if its value has: # - one item, all control_signals should get it (i.e., the default: (OWNER_VALUE, 0)); # - same number of items as the number of control_signals; # assign each control_signal to the corresponding item of the function's value # - a different number of items than number of control_signals, # leave things alone, and allow any errant indices for control_signals to be caught later. - else: - self.defaults.value = np.array(self.function.value) - self.parameters.value._set(copy.deepcopy(self.defaults.value), context) + self.defaults.value = np.array(self.function.value) + self.parameters.value._set(copy.deepcopy(self.defaults.value), context) - len_fct_value = len(self.function.value) + len_fct_value = len(self.function.value) - # Assign each ControlSignal's variable_spec to index of ControlMechanism's value - for i, control_signal in enumerate(self.control): + # Assign each ControlSignal's variable_spec to index of ControlMechanism's value + for i, control_signal in enumerate(self.control): - # If number of control_signals is same as number of items in function's value, - # assign each ControlSignal to the corresponding item of the function's value - if len_fct_value == num_control_signals: - control_signal._variable_spec = [(OWNER_VALUE, i)] + # If number of control_signals is same as number of items in function's value, + # assign each ControlSignal to the corresponding item of the function's value + if len_fct_value == len(self.control): + control_signal._variable_spec = (OWNER_VALUE, i) - if not isinstance(control_signal.owner_value_index, int): - assert False, \ - f"PROGRAM ERROR: The \'owner_value_index\' attribute for {control_signal.name} " \ - f"of {self.name} ({control_signal.owner_value_index})is not an int." + if not isinstance(control_signal.owner_value_index, int): + assert False, \ + f"PROGRAM ERROR: The \'owner_value_index\' attribute for {control_signal.name} " \ + f"of {self.name} ({control_signal.owner_value_index})is not an int." def _instantiate_control_signal(self, control_signal, context=None): """Parse and instantiate ControlSignal (or subclass relevant to ControlMechanism subclass) diff --git a/psyneulink/core/components/mechanisms/modulatory/control/defaultcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/defaultcontrolmechanism.py index 0c92b09e3be..c82fff09f9c 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/defaultcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/defaultcontrolmechanism.py @@ -40,6 +40,7 @@ from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism from psyneulink.core.globals.defaults import defaultControlAllocation from psyneulink.core.globals.keywords import CONTROL, INPUT_PORTS, NAME +from psyneulink.core.globals.parameters import check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import ContentAddressableList @@ -87,6 +88,7 @@ class DefaultControlMechanism(ControlMechanism): # PREFERENCE_SET_NAME: 'DefaultControlMechanismCustomClassPreferences', # PREFERENCE_KEYWORD: ...} + @check_user_specified @tc.typecheck def __init__(self, objective_mechanism:tc.optional(tc.any(ObjectiveMechanism, list))=None, diff --git a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py index 5338d545fc4..8aa950f2b4a 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py @@ -190,7 +190,7 @@ from psyneulink.core.globals.keywords import \ CONTROL, CONTROL_SIGNALS, GATE, GATING_PROJECTION, GATING_SIGNAL, GATING_SIGNALS, \ INIT_EXECUTE_METHOD_ONLY, MONITOR_FOR_CONTROL, PORT_TYPE, PROJECTIONS, PROJECTION_TYPE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_list @@ -433,6 +433,7 @@ class Parameters(ControlMechanism.Parameters): constructor_argument='gate' ) + @check_user_specified @tc.typecheck def __init__(self, default_gating_allocation=None, diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 9b7517f0f60..67b665ce8cf 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -605,7 +605,7 @@ ` made for each `control_allocation `). COMMENT - .. _OptimizationControlMechanism_State: +.. _OptimizationControlMechanism_State: *State* ~~~~~~~ @@ -748,23 +748,24 @@ If an OptimizationControlMechanism has an `objective_mechanism `, it is assigned a single outcome_input_port, named *OUTCOME*, that receives a Projection from the objective_mechanism's `OUTCOME OutputPort `. The OptimizationControlMechanism's `objective_mechanism -` is used to evaluate the outcome of executing its `agent_rep +` is used to evaluate the outcome of executing its `agent_rep ` for a given `state `. This passes the result to the OptimizationControlMechanism's *OUTCOME* InputPort, that is placed in its `outcome ` attribute. .. note:: - An OptimizationControlMechanism's `objective_mechanism ` and its `function - ` are distinct from, and should not be confused with the `objective_function - ` parameter of the OptimizationControlMechanism's `function - `. The `objective_mechanism `\\'s - `function ` evaluates the `outcome ` of processing - without taking into account the `costs ` of the OptimizationControlMechanism's - `control_signals `. In contrast, its `evaluate_agent_rep - ` method, which is assigned as the `objective_function` - parameter of its `function `, takes the `costs ` - of the OptimizationControlMechanism's `control_signals ` into - account when calculating the `net_outcome` that it returns as its result. + An OptimizationControlMechanism's `objective_mechanism ` and the `function + ` of that Mechanism, are distinct from and should not be confused with the + `objective_function ` parameter of the OptimizationControlMechanism's + `function `. The `objective_mechanism + `\\'s `function ` evaluates the `outcome + ` of processing without taking into account the `costs ` of + the OptimizationControlMechanism's `control_signals `. In + contrast, its `evaluate_agent_rep ` method, which is assigned + as the `objective_function` parameter of its `function `, takes the + `costs ` of the OptimizationControlMechanism's `control_signals + ` into account when calculating the `net_outcome` that it + returns as its result. COMMENT: ADD HINT HERE RE: USE OF CONCATENATION @@ -1098,9 +1099,9 @@ ALL, COMPOSITION, COMPOSITION_FUNCTION_APPROXIMATOR, CONCATENATE, DEFAULT_INPUT, DEFAULT_VARIABLE, EID_FROZEN, \ FUNCTION, INPUT_PORT, INTERNAL_ONLY, NAME, OPTIMIZATION_CONTROL_MECHANISM, NODE, OWNER_VALUE, PARAMS, PORT, \ PROJECTIONS, SHADOW_INPUTS, VALUE -from psyneulink.core.globals.registry import rename_instance_in_registry -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel +from psyneulink.core.globals.registry import rename_instance_in_registry from psyneulink.core.globals.sampleiterator import SampleIterator, SampleSpec from psyneulink.core.globals.utilities import convert_to_list, ContentAddressableList, is_numeric from psyneulink.core.llvm.debug import debug_env @@ -1417,7 +1418,8 @@ class OptimizationControlMechanism(ControlMechanism): its `monitor_for_control ` attribute, the values of which are used to compute the `net_outcome ` of executing the `agent_rep ` in a given `OptimizationControlMechanism_State` - (see `Outcome ` for additional details). + (see `objective_mechanism ` and `outcome_input_ports + ` for additional details). state : ndarray lists the values of the current state -- a concatenation of the `state_feature_values @@ -1739,6 +1741,7 @@ def _validate_state_feature_default_spec(self, state_feature_default): f"with a shape appropriate for all of the INPUT Nodes or InputPorts to which it will be applied." @handle_external_context() + @check_user_specified @tc.typecheck def __init__(self, agent_rep=None, @@ -3496,9 +3499,16 @@ def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): return f - def _gen_llvm_invoke_function(self, ctx, builder, function, params, context, variable, *, tags:frozenset): + def _gen_llvm_invoke_function(self, ctx, builder, function, params, context, + variable, out, *, tags:frozenset): fun = ctx.import_llvm_function(function) + + # The function returns (sample_optimal, value_optimal), + # but the value of mechanism is only 'sample_optimal' + # so we cannot reuse the space provided and need to explicitly copy + # the results later. fun_out = builder.alloca(fun.args[3].type.pointee, name="func_out") + value = builder.gep(fun_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) args = [params, context, variable, fun_out] # If we're calling compiled version of Composition.evaluate, @@ -3507,13 +3517,17 @@ def _gen_llvm_invoke_function(self, ctx, builder, function, params, context, var args += builder.function.args[-3:] builder.call(fun, args) - return fun_out, builder - def _gen_llvm_output_port_parse_variable(self, ctx, builder, params, state, value, port): - # The function returns (sample_optimal, value_optimal), - # but the value of mechanism is only 'sample_optimal' - value = builder.gep(value, [ctx.int32_ty(0), ctx.int32_ty(0)]) - return super()._gen_llvm_output_port_parse_variable(ctx, builder, params, state, value, port) + # The mechanism also converts the value to array of arrays + # e.g. [3 x double] -> [3 x [1 x double]] + assert len(value.type.pointee) == len(out.type.pointee) + assert value.type.pointee.element == out.type.pointee.element.element + with pnlvm.helpers.array_ptr_loop(builder, out, id='mech_value_copy') as (b, idx): + src = b.gep(value, [ctx.int32_ty(0), idx]) + dst = b.gep(out, [ctx.int32_ty(0), idx, ctx.int32_ty(0)]) + b.store(b.load(src), dst) + + return out, builder @property def agent_rep_type(self): diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index c61c02501f5..2ae1da4c11b 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -545,7 +545,7 @@ ADDITIVE, AFTER, ASSERT, ENABLED, INPUT_PORTS, \ LEARNED_PARAM, LEARNING, LEARNING_MECHANISM, LEARNING_PROJECTION, LEARNING_SIGNAL, LEARNING_SIGNALS, \ MATRIX, NAME, ONLINE, OUTPUT_PORT, OWNER_VALUE, PARAMS, PROJECTIONS, SAMPLE, PORT_TYPE, VARIABLE -from psyneulink.core.globals.parameters import FunctionParameter, Parameter +from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_np_array, is_numeric, parameter_spec, \ @@ -999,6 +999,7 @@ class Parameters(ModulatoryMechanism_Base.Parameters): structural=True, ) + @check_user_specified @tc.typecheck def __init__(self, # default_variable:tc.any(list, np.ndarray), diff --git a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py index df26cf1ded9..ebc92c25a03 100644 --- a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py @@ -140,6 +140,7 @@ from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base from psyneulink.core.globals.keywords import ADAPTIVE_MECHANISM +from psyneulink.core.globals.parameters import check_user_specified from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel __all__ = [ @@ -191,6 +192,7 @@ class Parameters(Mechanism_Base.Parameters): # PREFERENCE_SET_NAME: 'ModulatoryMechanismClassPreferences', # PREFERENCE_KEYWORD: ...} + @check_user_specified def __init__(self, default_variable, size, diff --git a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py index 80869f28701..94ce762873c 100644 --- a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py @@ -122,7 +122,7 @@ from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.keywords import COMPOSITION_INTERFACE_MECHANISM, INPUT_PORTS, OUTPUT_PORTS, \ PREFERENCE_SET_NAME -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel @@ -174,6 +174,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): """ function = Parameter(Identity, stateful=False, loggable=False) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py b/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py index 8bb14d9bd03..bf3770582bd 100644 --- a/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py @@ -18,6 +18,7 @@ from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base from psyneulink.core.globals.defaults import SystemDefaultInputValue from psyneulink.core.globals.keywords import DEFAULT_PROCESSING_MECHANISM +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -50,8 +51,9 @@ class DefaultProcessingMechanism_Base(Mechanism_Base): # PREFERENCE_KEYWORD: ...} class Parameters(Mechanism_Base.Parameters): - variable = np.array([SystemDefaultInputValue]) + variable = Parameter(np.array([SystemDefaultInputValue]), constructor_argument='default_variable') + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/mechanisms/processing/integratormechanism.py b/psyneulink/core/components/mechanisms/processing/integratormechanism.py index 4da4319a3bc..e11dd8b47b4 100644 --- a/psyneulink/core/components/mechanisms/processing/integratormechanism.py +++ b/psyneulink/core/components/mechanisms/processing/integratormechanism.py @@ -89,10 +89,9 @@ from psyneulink.core.components.functions.stateful.integratorfunctions import AdaptiveIntegrator from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base from psyneulink.core.components.mechanisms.mechanism import Mechanism -from psyneulink.core.globals.json import _substitute_expression_args from psyneulink.core.globals.keywords import \ DEFAULT_VARIABLE, INTEGRATOR_MECHANISM, VARIABLE, PREFERENCE_SET_NAME -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.utilities import parse_valid_identifier @@ -152,6 +151,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): function = Parameter(AdaptiveIntegrator(rate=0.5), stateful=False, loggable=False) # + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -255,7 +255,4 @@ def as_mdf_model(self): model.functions.extend(extra_noise_functions) function_model.args['noise'] = main_noise_function.id - for func_model in model.functions: - _substitute_expression_args(func_model) - return model diff --git a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py index 84b69156e63..2aaffee2c36 100644 --- a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py @@ -378,7 +378,7 @@ from psyneulink.core.globals.keywords import \ CONTROL, EXPONENT, EXPONENTS, LEARNING, MATRIX, NAME, OBJECTIVE_MECHANISM, OUTCOME, OWNER_VALUE, \ PARAMS, PREFERENCE_SET_NAME, PROJECTION, PROJECTIONS, PORT_TYPE, VARIABLE, WEIGHT, WEIGHTS -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.utilities import ContentAddressableList @@ -562,6 +562,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): standard_output_port_names.extend([OUTCOME]) # FIX: TYPECHECK MONITOR TO LIST OR ZIP OBJECT + @check_user_specified @tc.typecheck def __init__(self, monitor=None, diff --git a/psyneulink/core/components/mechanisms/processing/processingmechanism.py b/psyneulink/core/components/mechanisms/processing/processingmechanism.py index 8da4cbdfbe5..d6cbc63488c 100644 --- a/psyneulink/core/components/mechanisms/processing/processingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/processingmechanism.py @@ -98,7 +98,8 @@ from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import \ FUNCTION, MAX_ABS_INDICATOR, MAX_ABS_ONE_HOT, MAX_ABS_VAL, MAX_INDICATOR, MAX_ONE_HOT, MAX_VAL, MEAN, MEDIAN, \ - NAME, PROB, PROCESSING_MECHANISM, PREFERENCE_SET_NAME, STANDARD_DEVIATION, VARIANCE + NAME, PROB, PROCESSING_MECHANISM, PREFERENCE_SET_NAME, STANDARD_DEVIATION, VARIANCE, VARIABLE, OWNER_VALUE +from psyneulink.core.globals.parameters import check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel @@ -165,9 +166,11 @@ class ProcessingMechanism_Base(Mechanism_Base): {NAME: MAX_ABS_INDICATOR, FUNCTION: OneHot(mode=MAX_ABS_INDICATOR)}, {NAME: PROB, + VARIABLE: OWNER_VALUE, FUNCTION: SoftMax(output=PROB)}]) standard_output_port_names = [i['name'] for i in standard_output_ports] + @check_user_specified def __init__(self, default_variable=None, size=None, @@ -282,6 +285,7 @@ class ProcessingMechanism(ProcessingMechanism_Base): PREFERENCE_SET_NAME: 'ProcessingMechanismCustomClassPreferences', REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE)} + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 44adbd44596..ca81117a2c5 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -842,13 +842,13 @@ from psyneulink.core.components.ports.inputport import InputPort from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.context import ContextFlags, handle_external_context -from psyneulink.core.globals.json import _get_variable_parameter_name, _substitute_expression_args +from psyneulink.core.globals.mdf import _get_variable_parameter_name from psyneulink.core.globals.keywords import \ COMBINE, comparison_operators, EXECUTION_COUNT, FUNCTION, GREATER_THAN_OR_EQUAL, \ CURRENT_VALUE, LESS_THAN_OR_EQUAL, MAX_ABS_DIFF, \ NAME, NOISE, NUM_EXECUTIONS_BEFORE_FINISHED, OWNER_VALUE, RESET, RESULT, RESULTS, \ SELECTION_FUNCTION_TYPE, TRANSFER_FUNCTION_TYPE, TRANSFER_MECHANISM, VARIABLE -from psyneulink.core.globals.parameters import Parameter, FunctionParameter +from psyneulink.core.globals.parameters import Parameter, FunctionParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import \ @@ -1283,6 +1283,7 @@ def _validate_termination_comparison_op(self, termination_comparison_op): return f"must be boolean comparison operator or one of the following strings:" \ f" {','.join(comparison_operators.keys())}." + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1543,13 +1544,11 @@ def _gen_llvm_is_finished_cond(self, ctx, builder, params, state): return builder.fcmp_ordered("!=", is_finished_flag, is_finished_flag.type(0)) - # If modulated, termination threshold is single element array - if isinstance(threshold_ptr.type.pointee, pnlvm.ir.ArrayType): - assert len(threshold_ptr.type.pointee) == 1 - threshold_ptr = builder.gep(threshold_ptr, [ctx.int32_ty(0), - ctx.int32_ty(0)]) + # If modulated, termination threshold is single element array. + # Otherwise, it is scalar + threshold = pnlvm.helpers.load_extract_scalar_array_one(builder, + threshold_ptr) - threshold = builder.load(threshold_ptr) cmp_val_ptr = builder.alloca(threshold.type, name="is_finished_value") if self.termination_measure is max: assert self._termination_measure_num_items_expected == 1 @@ -1605,7 +1604,7 @@ def _gen_llvm_is_finished_cond(self, ctx, builder, params, state): return builder.fcmp_ordered(cmp_str, cmp_val, threshold) def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, - m_state, arg_in, ip_out, *, tags:frozenset): + m_state, m_in, m_val, ip_out, *, tags:frozenset): if self.integrator_mode: if_state = pnlvm.helpers.get_state_ptr(builder, self, m_state, @@ -1614,20 +1613,23 @@ def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, "integrator_function") if_params, builder = self._gen_llvm_param_ports_for_obj( self.integrator_function, if_base_params, ctx, builder, - m_base_params, m_state, arg_in) + m_base_params, m_state, m_in) mf_in, builder = self._gen_llvm_invoke_function( - ctx, builder, self.integrator_function, if_params, if_state, ip_out, tags=tags) + ctx, builder, self.integrator_function, if_params, + if_state, ip_out, None, tags=tags) else: mf_in = ip_out mf_state = pnlvm.helpers.get_state_ptr(builder, self, m_state, "function") mf_base_params = pnlvm.helpers.get_param_ptr(builder, self, m_base_params, "function") mf_params, builder = self._gen_llvm_param_ports_for_obj( - self.function, mf_base_params, ctx, builder, m_base_params, m_state, arg_in) + self.function, mf_base_params, ctx, builder, m_base_params, m_state, m_in) mf_out, builder = self._gen_llvm_invoke_function(ctx, builder, - self.function, mf_params, mf_state, mf_in, tags=tags) + self.function, mf_params, + mf_state, mf_in, m_val, + tags=tags) clip_ptr = pnlvm.helpers.get_param_ptr(builder, self, m_params, "clip") if len(clip_ptr.type.pointee) != 0: @@ -1852,7 +1854,4 @@ def as_mdf_model(self): integrator_function_model, 'noise', main_noise_function.id ) - for func_model in model.functions: - _substitute_expression_args(func_model) - return model diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index 2b1ee1b637a..84fb891f715 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -589,7 +589,7 @@ LEARNING_SIGNAL, MAPPING_PROJECTION, MATRIX, NAME, OPERATION, OUTPUT_PORT, OUTPUT_PORTS, OWNER, \ PARAMS, PRODUCT, PROJECTIONS, REFERENCE_VALUE, \ SENDER, SHADOW_INPUTS, SHADOW_INPUT_NAME, SIZE, PORT_TYPE, SUM, VALUE, VARIABLE, WEIGHT -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import \ @@ -874,6 +874,7 @@ def _validate_default_input(self, default_input): #endregion @handle_external_context() + @check_user_specified @tc.typecheck def __init__(self, owner=None, diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index a5e534579cc..5a9c22f9e6d 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -421,7 +421,8 @@ OUTPUT_PORT, OUTPUT_PORTS, OUTPUT_PORT_PARAMS, \ PARAMETER_PORT, PARAMETER_PORTS, PROJECTIONS, \ RECEIVER, FUNCTION -from psyneulink.core.globals.parameters import FunctionParameter, Parameter, get_validator_by_function +from psyneulink.core.globals.parameters import FunctionParameter, Parameter, get_validator_by_function, \ + check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.sampleiterator import SampleSpec, SampleIterator @@ -792,6 +793,7 @@ def _validate_allocation_samples(self, allocation_samples): #endregion + @check_user_specified @tc.typecheck def __init__(self, owner=None, diff --git a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py index b24e5da5eb7..62d0476e1c3 100644 --- a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py @@ -252,7 +252,7 @@ from psyneulink.core.globals.keywords import \ GATE, GATING_PROJECTION, GATING_SIGNAL, INPUT_PORT, INPUT_PORTS, \ MODULATES, OUTPUT_PORT, OUTPUT_PORTS, OUTPUT_PORT_PARAMS, PROJECTIONS, RECEIVER -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -417,6 +417,7 @@ class Parameters(ControlSignal.Parameters): #endregion + @check_user_specified @tc.typecheck def __init__(self, owner=None, diff --git a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py index 335896a8bc9..72500b84991 100644 --- a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py @@ -194,7 +194,7 @@ from psyneulink.core.components.ports.outputport import PRIMARY from psyneulink.core.globals.keywords import \ LEARNING_PROJECTION, LEARNING_SIGNAL, OUTPUT_PORT_PARAMS, PARAMETER_PORT, PARAMETER_PORTS, RECEIVER -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import parameter_spec @@ -333,6 +333,7 @@ class Parameters(ModulatorySignal.Parameters): value = Parameter(np.array([0]), read_only=True, aliases=['learning_signal'], pnl_internal=True) learning_rate = None + @check_user_specified @tc.typecheck def __init__(self, owner=None, diff --git a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py index deb1e474258..3e295b414cb 100644 --- a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py @@ -412,6 +412,7 @@ from psyneulink.core.globals.keywords import \ ADDITIVE_PARAM, CONTROL, DISABLE, MAYBE, MECHANISM, MODULATION, MODULATORY_SIGNAL, MULTIPLICATIVE_PARAM, \ OVERRIDE, PROJECTIONS, VARIABLE +from psyneulink.core.globals.parameters import check_user_specified from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel __all__ = [ @@ -562,6 +563,7 @@ class Parameters(OutputPort.Parameters): # PREFERENCE_SET_NAME: 'OutputPortCustomClassPreferences', # PREFERENCE_KEYWORD: ...} + @check_user_specified def __init__(self, owner=None, size=None, diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index 5c2be3a09bc..5e1c2bc1eba 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -631,7 +631,7 @@ OWNER_VALUE, PARAMS, PARAMS_DICT, PROJECTION, PROJECTIONS, RECEIVER, REFERENCE_VALUE, STANDARD_OUTPUT_PORTS, PORT, \ VALUE, VARIABLE, \ output_port_spec_to_parameter_name, INPUT_PORT_VARIABLES -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import \ @@ -905,6 +905,7 @@ class Parameters(Port_Base.Parameters): #endregion + @check_user_specified @tc.typecheck @handle_external_context() def __init__(self, diff --git a/psyneulink/core/components/ports/parameterport.py b/psyneulink/core/components/ports/parameterport.py index c37514c3f58..cd05d489203 100644 --- a/psyneulink/core/components/ports/parameterport.py +++ b/psyneulink/core/components/ports/parameterport.py @@ -382,7 +382,7 @@ CONTEXT, CONTROL_PROJECTION, CONTROL_SIGNAL, CONTROL_SIGNALS, FUNCTION, FUNCTION_PARAMS, \ LEARNING_SIGNAL, LEARNING_SIGNALS, MECHANISM, NAME, PARAMETER_PORT, PARAMETER_PORT_PARAMS, PATHWAY_PROJECTION, \ PROJECTION, PROJECTIONS, PROJECTION_TYPE, REFERENCE_VALUE, SENDER, VALUE -from psyneulink.core.globals.parameters import ParameterBase, ParameterAlias, SharedParameter +from psyneulink.core.globals.parameters import ParameterBase, ParameterAlias, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities \ @@ -701,6 +701,7 @@ class ParameterPort(Port_Base): #endregion tc.typecheck + @check_user_specified def __init__(self, owner, reference_value=None, diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index cdc89dc7b0b..bf17f401732 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -797,7 +797,7 @@ def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(self): RECEIVER, REFERENCE_VALUE, REFERENCE_VALUE_NAME, SENDER, STANDARD_OUTPUT_PORTS, \ PORT, PORT_COMPONENT_CATEGORY, PORT_CONTEXT, Port_Name, port_params, PORT_PREFS, PORT_TYPE, port_value, \ VALUE, VARIABLE, WEIGHT -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import VERBOSE_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import register_category @@ -1004,6 +1004,7 @@ class Parameters(Port.Parameters): classPreferenceLevel = PreferenceLevel.CATEGORY + @check_user_specified @tc.typecheck @abc.abstractmethod def __init__(self, diff --git a/psyneulink/core/components/projections/modulatory/controlprojection.py b/psyneulink/core/components/projections/modulatory/controlprojection.py index 624eb563a0d..72d17f635f6 100644 --- a/psyneulink/core/components/projections/modulatory/controlprojection.py +++ b/psyneulink/core/components/projections/modulatory/controlprojection.py @@ -120,7 +120,7 @@ from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.keywords import \ CONTROL, CONTROL_PROJECTION, CONTROL_SIGNAL, INPUT_PORT, OUTPUT_PORT, PARAMETER_PORT -from psyneulink.core.globals.parameters import Parameter, SharedParameter +from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -237,6 +237,7 @@ class Parameters(ModulatoryProjection_Base.Parameters): projection_sender = ControlMechanism + @check_user_specified @tc.typecheck def __init__(self, sender=None, diff --git a/psyneulink/core/components/projections/modulatory/gatingprojection.py b/psyneulink/core/components/projections/modulatory/gatingprojection.py index 1c852bbea2c..0bdcc4801e5 100644 --- a/psyneulink/core/components/projections/modulatory/gatingprojection.py +++ b/psyneulink/core/components/projections/modulatory/gatingprojection.py @@ -112,7 +112,7 @@ from psyneulink.core.globals.keywords import \ FUNCTION_OUTPUT_TYPE, GATE, GATING_MECHANISM, GATING_PROJECTION, GATING_SIGNAL, \ INPUT_PORT, OUTPUT_PORT -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -238,6 +238,7 @@ class Parameters(ModulatoryProjection_Base.Parameters): projection_sender = GatingMechanism + @check_user_specified @tc.typecheck def __init__(self, sender=None, diff --git a/psyneulink/core/components/projections/modulatory/learningprojection.py b/psyneulink/core/components/projections/modulatory/learningprojection.py index 4b1a4a8bb63..fe0d021db7a 100644 --- a/psyneulink/core/components/projections/modulatory/learningprojection.py +++ b/psyneulink/core/components/projections/modulatory/learningprojection.py @@ -202,7 +202,7 @@ from psyneulink.core.globals.keywords import \ LEARNING, LEARNING_PROJECTION, LEARNING_SIGNAL, \ MATRIX, PARAMETER_PORT, PROJECTION_SENDER, ONLINE, AFTER -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import iscompatible, parameter_spec @@ -440,6 +440,7 @@ class Parameters(ModulatoryProjection_Base.Parameters): projection_sender = LearningMechanism + @check_user_specified @tc.typecheck def __init__(self, sender:tc.optional(tc.any(LearningSignal, LearningMechanism))=None, diff --git a/psyneulink/core/components/projections/pathway/mappingprojection.py b/psyneulink/core/components/projections/pathway/mappingprojection.py index ba6f37c23a8..557c1b3dbd4 100644 --- a/psyneulink/core/components/projections/pathway/mappingprojection.py +++ b/psyneulink/core/components/projections/pathway/mappingprojection.py @@ -299,7 +299,7 @@ MAPPING_PROJECTION, MATRIX, \ OUTPUT_PORT, VALUE from psyneulink.core.globals.log import ContextFlags -from psyneulink.core.globals.parameters import FunctionParameter, Parameter +from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -442,6 +442,7 @@ class sockets: projection_sender = OutputPort + @check_user_specified def __init__(self, sender=None, receiver=None, diff --git a/psyneulink/core/components/projections/pathway/pathwayprojection.py b/psyneulink/core/components/projections/pathway/pathwayprojection.py index e777205f6c2..61952a9327b 100644 --- a/psyneulink/core/components/projections/pathway/pathwayprojection.py +++ b/psyneulink/core/components/projections/pathway/pathwayprojection.py @@ -16,8 +16,6 @@ * `PathwayProjection_Overview` * `PathwayProjection_Creation` * `PathwayProjection_Structure` - - `PathwayProjection_Sender` - - `PathwayProjection_Receiver` * `PathwayProjection_Execution` * `PathwayProjection_Class_Reference` @@ -46,7 +44,6 @@ A PathwayProjection has the same structure as a `Projection `. - .. _PathwayProjection_Execution: Execution @@ -63,10 +60,9 @@ """ -from psyneulink.core.components.projections.projection import Projection_Base, ProjectionRegistry +from psyneulink.core.components.projections.projection import Projection_Base from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.keywords import NAME, PATHWAY_PROJECTION, RECEIVER, SENDER -from psyneulink.core.globals.registry import remove_instance_from_registry __all__ = [] diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index 6999bca6702..d0f8c4c39b2 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -409,7 +409,7 @@ from psyneulink.core.components.ports.port import PortError from psyneulink.core.components.shellclasses import Mechanism, Process_Base, Projection, Port from psyneulink.core.globals.context import ContextFlags -from psyneulink.core.globals.json import _get_variable_parameter_name +from psyneulink.core.globals.mdf import _get_variable_parameter_name from psyneulink.core.globals.keywords import \ CONTROL, CONTROL_PROJECTION, CONTROL_SIGNAL, EXPONENT, FUNCTION_PARAMS, GATE, GATING_PROJECTION, GATING_SIGNAL, \ INPUT_PORT, LEARNING, LEARNING_PROJECTION, LEARNING_SIGNAL, \ @@ -418,7 +418,7 @@ NAME, OUTPUT_PORT, OUTPUT_PORTS, PARAMS, PATHWAY, PROJECTION, PROJECTION_PARAMS, PROJECTION_SENDER, PROJECTION_TYPE, \ RECEIVER, SENDER, STANDARD_ARGS, PORT, PORTS, WEIGHT, ADD_INPUT_PORT, ADD_OUTPUT_PORT, \ PROJECTION_COMPONENT_CATEGORY -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import register_category, remove_instance_from_registry from psyneulink.core.globals.socket import ConnectionInfo @@ -631,6 +631,7 @@ class Parameters(Projection.Parameters): classPreferenceLevel = PreferenceLevel.CATEGORY + @check_user_specified @abc.abstractmethod def __init__(self, receiver, @@ -1066,8 +1067,8 @@ def as_mdf_model(self, simple_edge_format=True): else: sender_mech = parse_valid_identifier(self.sender.owner.name) else: - sender_name = None - sender_mech = None + sender_name = '' + sender_mech = '' if not isinstance(self.receiver, type): try: @@ -1086,8 +1087,8 @@ def as_mdf_model(self, simple_edge_format=True): else: receiver_mech = parse_valid_identifier(self.receiver.owner.name) else: - receiver_name = None - receiver_mech = None + receiver_name = '' + receiver_mech = '' socket_dict = { MODEL_SPEC_ID_SENDER_PORT: f'{sender_mech}_{sender_name}', @@ -1147,10 +1148,7 @@ def as_mdf_model(self, simple_edge_format=True): else: metadata = self._mdf_metadata try: - metadata[MODEL_SPEC_ID_METADATA]['functions'] = mdf.Function.to_dict_format( - self.function.as_mdf_model(), - ordered=False - ) + metadata[MODEL_SPEC_ID_METADATA]['functions'] = mdf.Function.to_dict(self.function.as_mdf_model()) except AttributeError: # projection is in deferred init, special handling here? pass diff --git a/psyneulink/core/components/shellclasses.py b/psyneulink/core/components/shellclasses.py index 7820abc7328..d1d2dc94f84 100644 --- a/psyneulink/core/components/shellclasses.py +++ b/psyneulink/core/components/shellclasses.py @@ -28,6 +28,7 @@ """ from psyneulink.core.components.component import Component +from psyneulink.core.globals.parameters import check_user_specified __all__ = [ 'Function', 'Mechanism', 'Process_Base', 'Projection', 'ShellClass', 'ShellClassError', 'Port', 'System_Base', @@ -73,6 +74,7 @@ class Process_Base(ShellClass): class Mechanism(ShellClass): + @check_user_specified def __init__(self, default_variable=None, size=None, diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 836454b9c0f..b720ef3921e 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -8,8 +8,8 @@ # ********************************************* Composition ************************************************************ -""" +""" Contents -------- @@ -111,24 +111,36 @@ The following arguments of the Composition's constructor can be used to add Compnents when it is constructed: + .. _Composition_Pathways_Arg: + + - **pathways** + adds one or more `Pathways ` to the Composition; this is equivalent to constructing + the Composition and then calling its `add_pathways ` method, and can use the + same forms of specification as the **pathways** argument of that method (see `Pathway_Specification` for + additonal details). If any `learning Pathways ` are included, then the + constructor's **disable_learning** argument can be used to disable learning on those by default (though it + will still allow learning to occur on any other Compositions, either nested within the current one, + or within which the current one is nested (see `Composition_Learning` for a full description). + + .. _Composition_Nodes_Arg: + - **nodes** adds the specified `Nodes ` to the Composition; this is equivalent to constructing the Composition and then calling its `add_nodes ` method, and takes the same values as the - **nodes** argument of that method. + **nodes** argument of that method (note that this does *not* construct `Pathways ` for the specified + nodes; the **pathways** arg or `add_pathways ` method must be used to do so). + + .. _Composition_Projections_Arg: - **projections** adds the specified `Projections ` to the Composition; this is equivalent to constructing the Composition and then calling its `add_projections ` method, and takes the same - values as the **projections** argument of that method. + values as the **projections** argument of that method. In general, this is not neded -- default Projections + are created for Pathways and/or Nodes added to the Composition using the methods described above; however + it can be useful for custom configurations, including the implementation of specific Projection `matrices + `. - - **pathways** - adds one or more `Pathways ` to the Composition; this is equivalent to constructing the - Composition and then calling its `add_pathways ` method, and can use the same forms - of specification as the **pathways** argument of that method. If any `learning Pathways - ` are included, then the constructor's **disable_learning** argument can be - used to disable learning on those by default (though it will still allow learning to occur on any other - Compositions, either nested within the current one, or within which the current one is nested (see - `Composition_Learning` for a full description). + .. _Composition_Controller_Arg: - **controller** adds the specified `ControlMechanism` (typically an `OptimizationControlMechanism`) as the `controller @@ -179,10 +191,10 @@ - `add_linear_processing_pathway ` - adds and a list of `Nodes ` and `Projections ` to the Composition, - inserting a default Projection between any adjacent pair of Nodes for which one is not otherwise specified - (or possibly a set of Projections if either Node is a Composition -- see method documentation for details); - returns the `Pathway` added to the Composition. + adds and a list of `Nodes ` and `Projections ` to the Composition, inserting + a default Projection between any adjacent set(s) of Nodes for which a Projection is not otherwise specified + (see method documentation and `Pathway_Specification` for additonal details); returns the `Pathway` added to + the Composition. COMMENT: The following set of `learning methods ` can be used to add `Pathways @@ -2730,7 +2742,8 @@ def input_function(env, result): from psyneulink.core.components.functions.nonstateful.transferfunctions import Identity from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base, MechanismError, MechanismList from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism -from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import AGENT_REP, OptimizationControlMechanism +from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import AGENT_REP, \ + OptimizationControlMechanism from psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism import \ LearningMechanism, ACTIVATION_INPUT_INDEX, ACTIVATION_OUTPUT_INDEX, ERROR_SIGNAL, ERROR_SIGNAL_INDEX from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base @@ -2747,7 +2760,8 @@ def input_function(env, result): from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection, MappingError from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base -from psyneulink.core.components.projections.projection import Projection_Base, ProjectionError, DuplicateProjectionError +from psyneulink.core.components.projections.projection import \ + Projection_Base, ProjectionError, DuplicateProjectionError from psyneulink.core.components.shellclasses import Composition_Base from psyneulink.core.components.shellclasses import Mechanism, Projection from psyneulink.core.compositions.report import Report, \ @@ -2764,16 +2778,16 @@ def input_function(env, result): MONITOR, MONITOR_FOR_CONTROL, NAME, NESTED, NO_CLAMP, NODE, OBJECTIVE_MECHANISM, ONLINE, OUTCOME, \ OUTPUT, OUTPUT_CIM_NAME, OUTPUT_MECHANISM, OUTPUT_PORTS, OWNER_VALUE, \ PARAMETER, PARAMETER_CIM_NAME, PORT, \ - PROCESSING_PATHWAY, PROJECTION, PROJECTION_TYPE, PROJECTION_PARAMS, PULSE_CLAMP, \ - SAMPLE, SHADOW_INPUTS, SOFT_CLAMP, SSE, \ + PROCESSING_PATHWAY, PROJECTION, PROJECTION_TYPE, PROJECTION_PARAMS, PULSE_CLAMP, RECEIVER, \ + SAMPLE, SENDER, SHADOW_INPUTS, SOFT_CLAMP, SSE, \ TARGET, TARGET_MECHANISM, TEXT, VARIABLE, WEIGHT, OWNER_MECH from psyneulink.core.globals.log import CompositionLog, LogCondition -from psyneulink.core.globals.parameters import Parameter, ParametersBase +from psyneulink.core.globals.parameters import Parameter, ParametersBase, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import BasePreferenceSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel, _assign_prefs from psyneulink.core.globals.registry import register_category -from psyneulink.core.globals.utilities import \ - ContentAddressableList, call_with_pruned_args, convert_to_list, nesting_depth, convert_to_np_array, is_numeric, parse_valid_identifier +from psyneulink.core.globals.utilities import ContentAddressableList, call_with_pruned_args, convert_to_list, \ + nesting_depth, convert_to_np_array, is_numeric, is_matrix, parse_valid_identifier from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, Any, Condition, Never from psyneulink.core.scheduling.scheduler import Scheduler, SchedulingMode from psyneulink.core.scheduling.time import Time, TimeScale @@ -3316,12 +3330,30 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): --------- pathways : Pathway specification or list[Pathway specification...] - specifies one or more Pathways to add to the Compositions (see **pathways** argument of `add_pathways - `Composition.add_pathways` for specification format). + specifies one or more Pathways to add to the Compositions. A list containing `Node ` + and possible `Projection` specifications at its top level is treated as a single `Pathway`; a list containing + any nested lists or other forms of `Pathway specification ` is treated as + `multiple pathways ` (see `pathways ` as + well as `Pathway specification ` for additional details). + + .. technical_note:: + + The design pattern for use of sets and lists in specifying the **pathways** argument are: + - sets comprise Nodes that all occupy the same (parallel) position within a processing Pathway; + - lists comprise *sequences* of Nodes; embedded list are either ignored or a generate an error (see below) + (this is because lists of Nodes are interpreted as Pathways and Pathways cannot be nested, which would be + redundant since the same can be accomplished by simply including the items "inline" within a single list) + - if the Pathway specification contains (in its outer list): + - only a single item or set of items, each is treated as a SINGLETON in a Pathway; + - one or more lists, the items in each list are treated as separate (parallel) pathways; + - singly-nested lists ([[[A,B]],[[C,D]]]}), they are collapsed and treated as a Pathway; + - any list with more than one list nested within it ([[[A,B],[C,D]}), an error is generated; + - Pathway objects are treated as a list (if its pathway attribute is a set, it is wrapped in a list) + (see `tests ` for examples) nodes : `Mechanism `, `Composition` or list[`Mechanism `, `Composition`] : default None specifies one or more `Nodes ` to add to the Composition; these are each treated as - `SINGLETONs ` unless they are explicitly assigned `Projections `. + `SINGLETON `\\s unless they are explicitly assigned `Projections `. projections : `Projection ` or list[`Projection `] : default None specifies one or more `Projections ` to add to the Composition; these are not functional @@ -3449,7 +3481,7 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): argument of the Composition's constructor and/or one of its `Pathway addition methods `; each item is a list of `Nodes ` (`Mechanisms ` and/or Compositions) intercolated with the `Projection(s) ` between each - pair of Nodes; both Nodes are Mechanism, then only a single Projection can be specified; if either is a + pair of Nodes; if both Nodes are Mechanisms, then only a single Projection can be specified; if either is a Composition then, under some circumstances, there can be a set of Projections, specifying how the `INPUT ` Node(s) of the sender project to the `OUTPUT ` Node(s) of the receiver (see `add_linear_processing_pathway` for additional details). @@ -3711,6 +3743,7 @@ class Parameters(ParametersBase): class _CompilationData(ParametersBase): execution = None + @check_user_specified def __init__( self, pathways=None, @@ -3954,9 +3987,6 @@ def _analyze_graph(self, context=None): self._create_CIM_ports(context=context) # Call after above so shadow_projections have relevant organization self._update_shadow_projections(context=context) - # # FIX: 12/29/21 / 3/30/22: MOVE TO _update_shadow_projections - # # Call again to accommodate any changes from _update_shadow_projections - # self._determine_node_roles(context=context) self._check_for_projection_assignments(context=context) self.needs_update_graph = False @@ -4553,7 +4583,10 @@ def _determine_origin_and_terminal_nodes_from_consideration_queue(self): # consideration set. Identifying these assumes that graph_processing has been called/updated, # which identifies and "breaks" cycles, and assigns FEEDBACK_SENDER to the appropriate consideration set(s). for node in self.nodes: - if not any([efferent for efferent in node.efferents if efferent.receiver.owner is not self.output_CIM]): + if not any([ + efferent.is_active_in_composition(self) for efferent in node.efferents + if efferent.receiver.owner is not self.output_CIM + ]): self._add_node_role(node, NodeRole.TERMINAL) def _add_node_aux_components(self, node, context=None): @@ -4815,14 +4848,14 @@ def _determine_node_roles(self, context=None): this is currently the case, but is inconsistent with the analog in Control, where monitored Mechanisms *are* allowed to be OUTPUT; therefore, might be worth allowing TARGET_MECHANISM to be assigned as OUTPUT - - all Nodes for which OUTPUT has been assigned as a required_node_role, inculding by user + - all Nodes for which OUTPUT has been assigned as a required_node_role, inclUding by user (i.e., in self.required_node_roles[NodeRole.OUTPUT] TERMINAL: - all Nodes that - are not an ObjectiveMechanism assigned the role CONTROLLER_OBJECTIVE - or have *no* efferent projections OR - - or for for which any efferent projections are either: + - or for which any efferent projections are either: - to output_CIM OR - assigned as feedback (i.e., self.graph.comp_to_vertex[efferent].feedback == EdgeType.FEEDBACK .. _note:: @@ -4917,9 +4950,9 @@ def _determine_node_roles(self, context=None): # and doesn't project to any Nodes other than its `AutoassociativeLearningMechanism` # (this is not picked up as a `TERMINAL` since it projects to the `AutoassociativeLearningMechanism`) # but can (or already does) project to an output_CIM - if all((p.receiver.owner is node + if all((p.receiver.owner is node # <- recurrence or isinstance(p.receiver.owner, AutoAssociativeLearningMechanism) - or p.receiver.owner is self.output_CIM) + or p.receiver.owner is self.output_CIM) # <- already projects to an output_CIM for p in node.efferents): self._add_node_role(node, NodeRole.OUTPUT) continue @@ -5746,13 +5779,9 @@ def add_projection(self, return else: # Initialize Projection - projection._init_args['sender'] = sender - projection._init_args['receiver'] = receiver - try: - projection._deferred_init() - except DuplicateProjectionError: - # return projection - return + projection._init_args[SENDER] = sender + projection._init_args[RECEIVER] = receiver + projection._deferred_init() else: existing_projections = self._check_for_existing_projections(projection, sender=sender, receiver=receiver) @@ -5787,6 +5816,15 @@ def add_projection(self, projection.is_processing = False # KDM 5/24/19: removing below rename because it results in several existing_projections # projection.name = f'{sender} to {receiver}' + + # check for required role specification of feedback projections + for node, role in self.required_node_roles: + if ( + (node == projection.sender.owner and role == NodeRole.FEEDBACK_SENDER) + or (node == projection.receiver.owner and role == NodeRole.FEEDBACK_RECEIVER) + ): + feedback = True + self.graph.add_component(projection, feedback=feedback) try: @@ -6339,6 +6377,46 @@ def _get_destination(self, projection): # region ---------------------------------- PROCESSING ----------------------------------------------------------- + def _parse_pathway(self, pathway, name, pathway_arg_str): + from psyneulink.core.compositions.pathway import Pathway, _is_pathway_entry_spec + + # Deal with Pathway() or tuple specifications + if isinstance(pathway, Pathway): + # Give precedence to name specified in call to add_linear_processing_pathway + pathway_name = name or pathway.name + pathway = pathway.pathway + else: + pathway_name = name + + if isinstance(pathway, tuple): + # If tuple is just a single Node specification for a pathway, return in list: + if _is_pathway_entry_spec(pathway, NODE): + pathway = [pathway] + # If tuple is used to specify a sequence of nodes, convert to list (even though not documented): + elif all(_is_pathway_entry_spec(n, ANY) for n in pathway): + pathway = list(pathway) + # If tuple is (pathway, LearningFunction), get pathway and ignore LearningFunction + elif isinstance(pathway[1],type) and issubclass(pathway[1], LearningFunction): + warnings.warn(f"{LearningFunction.__name__} found in specification of {pathway_arg_str}: {pathway[1]}; " + f"it will be ignored") + pathway = pathway[0] + else: + raise CompositionError(f"Unrecognized tuple specification in {pathway_arg_str}: {pathway}") + elif not isinstance(pathway, collections.abc.Iterable) or all(_is_pathway_entry_spec(n, ANY) for n in pathway): + pathway = convert_to_list(pathway) + else: + bad_entry_error_msg = f"The following entries in a pathway specified for '{self.name}' are not " \ + f"a Node (Mechanism or Composition) or a Projection nor a set of either: " + bad_entries = [repr(entry) for entry in pathway if not _is_pathway_entry_spec(entry, ANY)] + raise CompositionError(f"{bad_entry_error_msg}{','.join(bad_entries)}") + # raise CompositionError(f"Unrecognized specification in {pathway_arg_str}: {pathway}") + + lists = [entry for entry in pathway + if isinstance(entry, list) and all(_is_pathway_entry_spec(node, NODE) for node in entry)] + if lists: + raise CompositionError(f"Pathway specification for {pathway_arg_str} has embedded list(s): {lists}") + return pathway, pathway_name + # FIX: REFACTOR TO TAKE Pathway OBJECT AS ARGUMENT def add_pathway(self, pathway): """Add an existing `Pathway ` to the Composition @@ -6370,51 +6448,194 @@ def add_pathway(self, pathway): self._analyze_graph() + @handle_external_context() + def add_pathways(self, pathways, context=None): + """Add pathways to the Composition. + + Arguments + --------- + + pathways : Pathway or list[Pathway] + specifies one or more `Pathways ` to add to the Composition. Any valid form of `Pathway + specification ` can be used. A set can also be used, all elements of which are + `Nodes `, in which case a separate `Pathway` is constructed for each. + + Returns + ------- + + list[Pathway] : + List of `Pathways ` added to the Composition. + + """ + + # Possible specifications for **pathways** arg: + # Node specs (single or set): + # 0 Single node: NODE + # 1 Set: {NODE...} -> generate a Pathway for each NODE + # Single pathway spec (list, tuple or dict): + # 2 single list: PWAY = [NODE] or [NODE...] in which *all* are NODES with optional intercolated Projections + # 2.5 single with sets: PWAY = [NODE or {NODE...}] or [NODE or {NODE...}, NODE or {NODE...}...] + # 3 single tuple: (PWAY, LearningFunction) = (NODE, LearningFunction) or + # ([NODE...], LearningFunction) + # 4 single dict: {NAME: PWAY} = {NAME: NODE} or + # {NAME: [NODE...]} or + # {NAME: ([NODE...], LearningFunction)} + # Multiple pathway specs (in outer list): + # 5 list with list(s): [PWAY] = [NODE, [NODE]] or [[NODE...]...] + # 6 list with tuple(s): [(PWAY, LearningFunction)...] = [(NODE..., LearningFunction)...] or + # [([NODE...], LearningFunction)...] + # 7 list with dict: [{NAME: PWAY}...] = [{NAME: NODE...}...] or + # [{NAME: [NODE...]}...] or + # [{NAME: (NODE, LearningFunction)}...] or + # [{NAME: ([NODE...], LearningFunction)}...] + + from psyneulink.core.compositions.pathway import Pathway, _is_node_spec, _is_pathway_entry_spec + + if context.source == ContextFlags.COMMAND_LINE: + pathways_arg_str = f"'pathways' arg for the add_pathways method of {self.name}" + elif context.source == ContextFlags.CONSTRUCTOR: + pathways_arg_str = f"'pathways' arg of the constructor for {self.name}" + else: + assert False, f"PROGRAM ERROR: unrecognized context passed to add_pathways of {self.name}." + context.string = pathways_arg_str + + if not pathways: + return + + # Possibilities 0, 3 or 4 (single NODE, set of NODESs tuple, dict or Pathway specified, so convert to list + if _is_node_spec(pathways) or isinstance(pathways, (tuple, dict, Pathway)): + pathways = convert_to_list(pathways) + + # Possibility 1 (set of Nodes): create a Pathway for each Node (since set is in pathways arg) + elif isinstance(pathways, set): + pathways = [pathways] + + # Possibility 2 (list is a single pathway spec) or 2.5 (includes one or more sets): + if (isinstance(pathways, list) and + # First item must be a node_spec or set of them + ((_is_node_spec(pathways[0]) + or (isinstance(pathways[0], set) and all(_is_node_spec(item) for item in pathways[0]))) + # All other items must be either Nodes, Projections or sets + and all(_is_pathway_entry_spec(p, ANY) for p in pathways))): + # Place in outter list (to conform to processing of multiple pathways below) + pathways = [pathways] + # assert False, f"GOT TO POSSIBILITY 2" # SHOULD HAVE BEEN DONE ABOVE + + # If pathways is not now a list it must be illegitimate + if not isinstance(pathways, list): + raise CompositionError(f"The {pathways_arg_str} must be a " + f"Node, list, set, tuple, dict or Pathway object: {pathways}.") + + # pathways should now be a list in which each entry should be *some* form of pathway specification + # (including original spec as possibilities 5, 6, or 7) + + # If there are any lists of Nodes in pathway, or a Pathway or dict with such a list, + # then treat ALL entries as parallel pathways, and embed in lists" + if (isinstance(pathways, collections.abc.Iterable) + and any(isinstance(pathway, (list, dict, Pathway))) for pathway in pathways): + pathways = [pathway if isinstance(pathway, (list, dict, Pathway)) else [pathway] for pathway in pathways] + else: + # Put single pathway in outer list for consistency of handling below (with specified pathway as pathways[0]) + pathways = np.atleast_2d(np.array(pathways, dtype=object)).tolist() + + added_pathways = [] + + def identify_pway_type_and_parse_tuple_prn(pway, tuple_or_dict_str): + """ + Determine whether pway is PROCESSING_PATHWAY or LEARNING_PATHWAY and, if it is the latter, + parse tuple into pathway specification and LearningFunction. + Return pathway type, pathway, and learning_function or None + """ + learning_function = None + + if isinstance(pway, Pathway): + pway = pway.pathway + + if (_is_node_spec(pway) or isinstance(pway, (list, set)) or + # Forgive use of tuple to specify a pathway, and treat as if it was a list spec + (isinstance(pway, tuple) and all(_is_pathway_entry_spec(n, ANY) for n in pathway))): + pway_type = PROCESSING_PATHWAY + if isinstance(pway, set): + pway = [pway] + return pway_type, pway, None + elif isinstance(pway, tuple): + pway_type = LEARNING_PATHWAY + if len(pway)!=2: + raise CompositionError(f"A tuple specified in the {pathways_arg_str}" + f" has more than two items: {pway}") + pway, learning_function = pway + if not (_is_node_spec(pway) or isinstance(pway, (list, Pathway))): + raise CompositionError(f"The 1st item in {tuple_or_dict_str} specified in the " + f" {pathways_arg_str} must be a node or a list: {pway}") + if not (isinstance(learning_function, type) and issubclass(learning_function, LearningFunction)): + raise CompositionError(f"The 2nd item in {tuple_or_dict_str} specified in the " + f"{pathways_arg_str} must be a LearningFunction: {learning_function}") + return pway_type, pway, learning_function + else: + assert False, f"PROGRAM ERROR: arg to identify_pway_type_and_parse_tuple_prn in {self.name}" \ + f"is not a Node, list or tuple: {pway}" + + # Validate items in pathways list and add to Composition using relevant add_linear_<> method. + bad_entry_error_msg = f"Every item in the {pathways_arg_str} must be a " \ + f"Node, list, set, tuple or dict; the following are not: " + for pathway in pathways: + pathway = pathway[0] if isinstance(pathway, list) and len(pathway) == 1 else pathway + pway_name = None + if isinstance(pathway, Pathway): + pway_name = pathway.name + pathway = pathway.pathway + if _is_node_spec(pathway) or isinstance(pathway, (list, set, tuple)): + if isinstance(pathway, set): + bad_entries = [repr(entry) for entry in pathway if not _is_node_spec(entry)] + if bad_entries: + raise CompositionError(f"{bad_entry_error_msg}{','.join(bad_entries)}") + pway_type, pway, pway_learning_fct = identify_pway_type_and_parse_tuple_prn(pathway, f"a tuple") + elif isinstance(pathway, dict): + if len(pathway)!=1: + raise CompositionError(f"A dict specified in the {pathways_arg_str} " + f"contains more than one entry: {pathway}.") + pway_name, pway = list(pathway.items())[0] + if not isinstance(pway_name, str): + raise CompositionError(f"The key in a dict specified in the {pathways_arg_str} must be a str " + f"(to be used as its name): {pway_name}.") + if _is_node_spec(pway) or isinstance(pway, (list, tuple, Pathway)): + pway_type, pway, pway_learning_fct = identify_pway_type_and_parse_tuple_prn(pway, + f"the value of a dict") + else: + raise CompositionError(f"The value in a dict specified in the {pathways_arg_str} must be " + f"a pathway specification (Node, list or tuple): {pway}.") + else: + raise CompositionError(f"{bad_entry_error_msg}{repr(pathway)}") + + context.source = ContextFlags.METHOD + if pway_type == PROCESSING_PATHWAY: + new_pathway = self.add_linear_processing_pathway(pathway=pway, + name=pway_name, + context=context) + elif pway_type == LEARNING_PATHWAY: + new_pathway = self.add_linear_learning_pathway(pathway=pway, + learning_function=pway_learning_fct, + name=pway_name, + context=context) + else: + assert False, f"PROGRAM ERROR: failure to determine pathway_type in add_pathways for {self.name}." + + added_pathways.append(new_pathway) + + return added_pathways + @handle_external_context() def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *args): - """Add sequence of `Nodes ` with intercolated Projections. + """Add sequence of `Nodes ` with optionally intercolated `Projections `. .. _Composition_Add_Linear_Processing_Pathway: - Each `Node ` can be either a `Mechanism`, a `Composition`, or a tuple (Mechanism, `NodeRoles - `) that can be used to assign `required_roles` to Mechanisms (see `Composition_Nodes` for additional - details). - - `Projections ` can be intercolated between any pair of `Nodes `. If both Nodes - of a pair are Mechanisms, a single `MappingProjection` can be `specified `. The - same applies if the first Node is a `Composition` with a single `OUTPUT ` Node and/or the - second is a `Composition` with a single `INPUT ` Node. If either has more than one `INPUT - ` or `OUTPUT ` Node, respectively, then a list or set of Projections can be - specified for each pair of nested Nodes. If no `Projection` is specified between a pair of contiguous Nodes, - then default Projection(s) are constructed between them, as follows: - - * *One to one* - if both Nodes are Mechanisms or, if either is a Composition, the first (sender) has - only a single `OUTPUT ` Node and the second (receiver) has only a single `INPUT - ` Node, then a default `MappingProjection` is created from the `primary OutputPort - ` of the sender (or of its sole `OUTPUT ` Node if the sener is a - Composition) to the `primary InputPort ` of the receiver (or of its sole of `INPUT - ` Node if the receiver is a Composition). - - * *One to many* - if the first Node (sender) is either a Mechanism or a Composition with a single - `OUTPUT ` Node, but the second (receiver) is a Composition with more than one - `INPUT ` Node, then a `MappingProjection` is created from the `primary OutputPort - ` of the sender Mechanism (or of its sole `OUTPUT ` Node if the - sender is a Compostion) to each `INPUT ` Node of the receiver, and a *set* - containing the Projections is intercolated between the two Nodes in the `Pathway`. - - * *Many to one* - if the first Node (sender) is a Composition with more than one `OUTPUT ` - Node, and the second (receiver) is either a Mechanism or a Composition with a single `INPUT ` - Node, then a `MappingProjection` is created from each `OUPUT ` Node of the sender to the - `primary InputPort ` of the receiver Mechanism (or of its sole `INPUT ` - Node if the receiver is a Composition), and a *set* containing the Projections is intercolated - between the two Nodes in the `Pathway`. - - * *Many to many* - if both Nodes are Compositions in which the sender has more than one `INPUT ` - Node and the receiver has more than one `INPUT ` Node, it is not possible to determine - the correct configuration automatically, and an error is generated. In this case, a set of Projections - must be explicitly specified. - - .. _note:: + A Pathway is specified as a list, each element of which is either a `Node ` or + set of Nodes, possibly intercolated with specifications of `Projections ` between them. + The Node(s) specified in each entry of the list project to the Node(s) specified in the next entry + (see `Pathway_Specification` for details). + + .. note:: Any specifications of the **monitor_for_control** `argument ` of a constructor for a `ControlMechanism` or the **monitor** argument in the constructor for an `ObjectiveMechanism` in the **objective_mechanism** `argument ` of a @@ -6430,9 +6651,8 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a be used, however if a 2-item (Pathway, LearningFunction) tuple is used, the `LearningFunction` is ignored (this should be used with `add_linear_learning_pathway` if a `learning Pathway ` is desired). A `Pathway` object can also be used; again, however, any - learning-related specifications are ignored, as are its `name ` if the **name** - argument of add_linear_processing_pathway is specified. - See `above ` for additional details. + learning-related specifications are ignored, as are its `name ` if the **name** argument + of add_linear_processing_pathway is specified. name : str species the name used for `Pathway`; supercedes `name ` of `Pathway` object if it is has one. @@ -6442,12 +6662,15 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a `Pathway` : `Pathway` added to Composition. - """ from psyneulink.core.compositions.pathway import Pathway, _is_node_spec, _is_pathway_entry_spec + def _get_spec_if_tuple(spec): + return spec[0] if isinstance(spec, tuple) else spec + nodes = [] + node_entries = [] # If called internally, use its pathway_arg_str in error messages (in context.string) if context.source is not ContextFlags.COMMAND_LINE: @@ -6459,48 +6682,37 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a context.source = ContextFlags.METHOD context.string = pathway_arg_str - # First, deal with Pathway() or tuple specifications - if isinstance(pathway, Pathway): - # Give precedence to name specified in call to add_linear_processing_pathway - pathway_name = name or pathway.name - pathway = pathway.pathway - else: - pathway_name = name - - if _is_pathway_entry_spec(pathway, ANY): - pathway = convert_to_list(pathway) - elif isinstance(pathway, tuple): - # If tuple is used to specify a sequence of nodes, convert to list (even though not documented): - if all(_is_pathway_entry_spec(n, ANY) for n in pathway): - pathway = list(pathway) - # If tuple is (pathway, LearningFunction), get pathway and ignore LearningFunction - elif isinstance(pathway[1],type) and issubclass(pathway[1], LearningFunction): - warnings.warn(f"{LearningFunction.__name__} found in specification of {pathway_arg_str}: {pathway[1]}; " - f"it will be ignored") - pathway = pathway[0] - else: - raise CompositionError(f"Unrecognized tuple specification in {pathway_arg_str}: {pathway}") - else: - raise CompositionError(f"Unrecognized specification in {pathway_arg_str}: {pathway}") + pathway, pathway_name = self._parse_pathway(pathway, name, pathway_arg_str) - # Then, verify that the pathway begins with a node + # Verify that the pathway begins with a Node or set of Nodes if _is_node_spec(pathway[0]): # Use add_nodes so that node spec can also be a tuple with required_roles - self.add_nodes(nodes=[pathway[0]], - context=context) + self.add_nodes(nodes=[pathway[0]], context=context) nodes.append(pathway[0]) + node_entries.append(pathway[0]) + # Or a set of Nodes + elif isinstance(pathway[0], set): + self.add_nodes(nodes=pathway[0], context=context) + nodes.extend(pathway[0]) + node_entries.append(pathway[0]) else: # 'MappingProjection has no attribute _name' error is thrown when pathway[0] is passed to the error msg raise CompositionError(f"First item in {pathway_arg_str} must be " f"a Node (Mechanism or Composition): {pathway}.") - # Next, add all of the remaining nodes in the pathway + # Add all of the remaining nodes in the pathway for c in range(1, len(pathway)): - # if the current item is a Mechanism, Composition or (Mechanism, NodeRole(s)) tuple, add it + # if the entry is for a Node (Mechanism, Composition or (Mechanism, NodeRole(s)) tuple), add it if _is_node_spec(pathway[c]): self.add_nodes(nodes=[pathway[c]], context=context) nodes.append(pathway[c]) + node_entries.append(pathway[c]) + # If the entry is for a set of Nodes, add them + elif isinstance(pathway[c], set) and all(_is_node_spec(entry) for entry in pathway[c]): + self.add_nodes(nodes=pathway[c], context=context) + nodes.extend(pathway[c]) + node_entries.append(pathway[c]) # Then, delete any ControlMechanism that has its monitor_for_control attribute assigned # and any ObjectiveMechanism that projects to a ControlMechanism, @@ -6532,146 +6744,271 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a projections = [] for c in range(1, len(pathway)): - # if the current item is a Node - if _is_node_spec(pathway[c]): - if _is_node_spec(pathway[c - 1]): - # if the previous item was also a node, add a MappingProjection between them - if isinstance(pathway[c - 1], tuple): - sender = pathway[c - 1][0] - else: - sender = pathway[c - 1] - if isinstance(pathway[c], tuple): - receiver = pathway[c][0] - else: - receiver = pathway[c] - - # If sender and/or receiver is a Composition with INPUT or OUTPUT Nodes, - # replace it with those Nodes - senders = self._get_nested_nodes_with_same_roles_at_all_levels(sender, NodeRole.OUTPUT) - receivers = self._get_nested_nodes_with_same_roles_at_all_levels(receiver, - NodeRole.INPUT, NodeRole.TARGET) - if senders or receivers: - senders = senders or convert_to_list(sender) - receivers = receivers or convert_to_list(receiver) - if len(senders) > 1 and len(receivers) > 1: - raise CompositionError(f"Pathway specified with two contiguous Compositions, the first of " - f"which ({sender.name}) has more than one OUTPUT Node, and second " - f"of which ({receiver.name}) has more than one INPUT Node, making " - f"the configuration of Projections between them ambiguous; please " - f"specify those Projections explicitly.") - proj = {self.add_projection(sender=s, receiver=r, allow_duplicates=False) - for r in receivers for s in senders} - else: - proj = self.add_projection(sender=sender, receiver=receiver) - if proj: - projections.append(proj) - - # if the current item is a Projection specification + # NODE ENTRY ---------------------------------------------------------------------------------------- + def _get_node_specs_for_entry(entry, include_roles=None, exclude_roles=None): + """Extract Nodes from any tuple specs and replace Compositions with their INPUT Nodes + """ + nodes = [] + for node in entry: + # Extract Nodes from any tuple specs + node = _get_spec_if_tuple(node) + # Replace any nested Compositions with their INPUT Nodes + node = (self._get_nested_nodes_with_same_roles_at_all_levels(node, include_roles, exclude_roles) + if isinstance(node, Composition) else [node]) + nodes.extend(node) + return nodes + + # The current entry is a Node or a set of them: + # - if it is a set, list or array, leave as is, else place in set for consistency of processing below + current_entry = pathway[c] if isinstance(pathway[c], (set, list, np.ndarray)) else {pathway[c]} + if all(_is_node_spec(entry) for entry in current_entry): + receivers = _get_node_specs_for_entry(current_entry, NodeRole.INPUT, NodeRole.TARGET) + # The preceding entry is a Node or set of them: + # - if it is a set, list or array, leave as is, else place in set for consistnecy of processin below + preceding_entry = (pathway[c - 1] if isinstance(pathway[c - 1], (set, list, np.ndarray)) + else {pathway[c - 1]}) + if all(_is_node_spec(sender) for sender in preceding_entry): + senders = _get_node_specs_for_entry(preceding_entry, NodeRole.OUTPUT) + projs = {self.add_projection(sender=s, receiver=r, allow_duplicates=False) + for r in receivers for s in senders} + if all(projs): + projs = projs.pop() if len(projs) == 1 else projs + projections.append(projs) + + # PROJECTION ENTRY -------------------------------------------------------------------------- + # Validate that it is between two nodes, then add the Projection; + # note: if Projection is already instantiated and valid, it is used as is; if it is a set or list: + # - those are implemented between the corresponding pairs of sender and receiver Nodes + # - the list or set has a default Projection or matrix specification, + # that is used between all pairs of Nodes for which a Projection has not been specified + + # The current entry is a Projection specification or a list or set of them elif _is_pathway_entry_spec(pathway[c], PROJECTION): - # Convert pathway[c] to list (embedding in one if matrix) for consistency of handling below - # try: - # proj_specs = set(convert_to_list(pathway[c])) - # except TypeError: - # proj_specs = [pathway[c]] - if is_numeric(pathway[c]): - proj_specs = [pathway[c]] + + # Validate that Projection specification is not last entry + if c == len(pathway) - 1: + raise CompositionError(f"The last item in the {pathway_arg_str} cannot be a Projection: " + f"{pathway[c]}.") + + # Validate that entry is between two Nodes (or sets of Nodes) + # and get all pairings of sender and receiver nodes + prev_entry = pathway[c - 1] + next_entry = pathway[c + 1] + if ((_is_node_spec(prev_entry) or isinstance(prev_entry, set)) + and (_is_node_spec(next_entry) or isinstance(next_entry, set))): + senders = [_get_spec_if_tuple(sender) for sender in convert_to_list(prev_entry)] + receivers = [_get_spec_if_tuple(receiver) for receiver in convert_to_list(next_entry)] + node_pairs = list(itertools.product(senders,receivers)) + else: + raise CompositionError(f"A Projection specified in {pathway_arg_str} " + f"is not between two Nodes: {pathway[c]}") + + # Convert specs in entry to list (embedding in one if matrix) for consistency of handling below + all_proj_specs = [pathway[c]] if is_numeric(pathway[c]) else convert_to_list(pathway[c]) + + # Get default Projection specification + # Must be a matrix spec, or a Projection with no sender or receiver specified + # If it is: + # - a single Projection, not in a set or list + # - appears only once in the pathways arg + # - it is preceded by only one sender Node and followed by only one receiver Node + # then treat as an individual Projection specification and not a default projection specification + possible_default_proj_spec = [proj_spec for proj_spec in all_proj_specs + if (is_matrix(proj_spec) + or (isinstance(proj_spec, Projection) + and proj_spec._initialization_status & ContextFlags.DEFERRED_INIT + and proj_spec._init_args[SENDER] is None + and proj_spec._init_args[RECEIVER] is None))] + # Validate that there is no more than one default Projection specification + if len(possible_default_proj_spec) > 1: + raise CompositionError(f"There is more than one matrix specification in the set of Projection " + f"specifications for entry {c} of the {pathway_arg_str}: " + f"{possible_default_proj_spec}.") + # Get spec from list: + spec = possible_default_proj_spec[0] if possible_default_proj_spec else None + # If it appears only once on its own in the pathways arg and there is only one sender and one receiver + # consider it an individual Projection specification rather than a specification of the default + if sum(isinstance(s, Projection) and s is spec for s in pathway) == len(senders) == len(receivers) == 1: + default_proj_spec = None + proj_specs = all_proj_specs else: - proj_specs = convert_to_list(pathway[c]) + # Unpack if tuple spec, and assign feedback (with False as default) + default_proj_spec, feedback = (spec if isinstance(spec, tuple) else (spec, False)) + # Get all specs other than default_proj_spec + # proj_specs = [proj_spec for proj_spec in all_proj_specs if proj_spec not in possible_default_proj_spec] + proj_specs = [proj_spec for proj_spec in all_proj_specs if proj_spec is not spec] + + # Collect all Projection specifications (to add to Composition at end) proj_set = [] - for proj_spec in proj_specs: - if c == len(pathway) - 1: - raise CompositionError(f"The last item in the {pathway_arg_str} cannot be a Projection: " - f"{proj_spec}.") - # confirm that it is between two nodes, then add the projection - if isinstance(proj_spec, tuple): - proj = proj_spec[0] - feedback = proj_spec[1] - else: - proj = proj_spec - feedback = False - sender = pathway[c - 1] - receiver = pathway[c + 1] - if _is_node_spec(sender) and _is_node_spec(receiver): - if isinstance(sender, tuple): - sender = sender[0] - if isinstance(receiver, tuple): - receiver = receiver[0] + + def handle_misc_errors(proj, error): + raise CompositionError(f"Bad Projection specification in {pathway_arg_str} ({proj}): " + f"{str(error.error_value)}") + + def handle_duplicates(sender, receiver): + duplicate = [p for p in receiver.afferents if p in sender.efferents] + assert len(duplicate)==1, \ + f"PROGRAM ERROR: Could not identify duplicate on DuplicateProjectionError " \ + f"for {Projection.__name__} between {sender.name} and {receiver.name} " \ + f"in call to {repr('add_linear_processing_pathway')} for {self.name}." + duplicate = duplicate[0] + warning_msg = f"Projection specified between {sender.name} and {receiver.name} " \ + f"in {pathway_arg_str} is a duplicate of one" + # IMPLEMENTATION NOTE: Version that allows different Projections between same + # sender and receiver in different Compositions + # if duplicate in self.projections: + # warnings.warn(f"{warning_msg} already in the Composition ({duplicate.name}) " + # f"and so will be ignored.") + # proj=duplicate + # else: + # if self.prefs.verbosePref: + # warnings.warn(f" that already exists between those nodes ({duplicate.name}). The " + # f"new one will be used; delete it if you want to use the existing one") + # Version that forbids *any* duplicate Projections between same sender and receiver + warnings.warn(f"{warning_msg} that already exists between those nodes ({duplicate.name}) " + f"and so will be ignored.") + proj_set.append(self.add_projection(duplicate)) + + # PARSE PROJECTION SPECIFICATIONS AND INSTANTIATE PROJECTIONS + # IMPLEMENTATION NOTE: + # self.add_projection is called for each Projection + # to catch any duplicates with exceptions below + + # FIX: 4/9/22 - REFACTOR TO DO ANY SPECIFIED ASSIGNMENTS FIRST, AND THEN DEFAULT ASSIGNMENTS (IF ANY) + if default_proj_spec is not None and not proj_specs: + # If there is a default specification and no other Projection specs, + # use default to construct Projections for all node_pairs + for sender, receiver in node_pairs: try: - if isinstance(proj, (np.ndarray, np.matrix, list)): - # If proj is a matrix specification, use it as the matrix arg - proj = MappingProjection(sender=sender, - matrix=proj, - receiver=receiver) + # Default is a Projection + if isinstance(default_proj_spec, Projection): + # Copy so that assignments made to instantiated Projection don't affect default + projection = self.add_projection(projection=deepcopy(default_proj_spec), + sender=sender, + receiver=receiver, + allow_duplicates=False, + feedback=feedback) else: - # Otherwise, if it is Port specification, implement default Projection + # Default is a matrix_spec + assert is_matrix(default_proj_spec), \ + f"PROGRAM ERROR: Expected {default_proj_spec} to be " \ + f"a matrix specification in {pathway_arg_str}." + projection = self.add_projection(projection=MappingProjection(sender=sender, + matrix=default_proj_spec, + receiver=receiver), + allow_duplicates=False, + feedback=feedback) + proj_set.append(projection) + + except (InputPortError, ProjectionError, MappingError) as error: + handle_misc_errors(proj, error) + except DuplicateProjectionError: + handle_duplicates(sender, receiver) + + else: + # FIX: 4/9/22 - PUT THIS FIRST (BEFORE BLOCK JUST ABOVE) AND THEN ASSIGN TO ANY LEFT IN node_pairs + # Projections have been specified + for proj_spec in proj_specs: + try: + proj = _get_spec_if_tuple(proj_spec) + feedback = proj_spec[1] if isinstance(proj_spec, tuple) else False + + if isinstance(proj, Projection): + # FIX 4/9/22 - TEST FOR DEFERRED INIT HERE (THAT IS NOT A default_proj_spec) + # IF JUST SENDER OR RECEIVER, TREAT AS PER PORTS BELOW + # Validate that Projection is between a Node in senders and one in receivers + if proj._initialization_status & ContextFlags.DEFERRED_INIT: + sender_node = senders[0] + receiver_node = receivers[0] + else: + sender_node = proj.sender.owner + receiver_node = proj.receiver.owner + proj_set.append(self.add_projection(proj, + sender = sender_node, + receiver = receiver_node, + allow_duplicates=False, feedback=feedback)) + if default_proj_spec: + # If there IS a default Projection specification, remove from node_pairs + # only the entry for the sender-receiver pair, so that the sender is assigned + # a default Projection to all other receivers (to which a Projection is not + # explicitly specified) and the receiver is assigned a default Projection from + # all other senders (from which a Projection is not explicitly specified). + node_pairs = [pair for pair in node_pairs + if not all(node in pair for node in {sender_node, receiver_node})] + else: + # If there is NOT a default Projection specification, remove from node_pairs + # all other entries with either the same sender OR receiver, so that neither + # the sender nor receiver are assigned any other default Projections. + node_pairs = [pair for pair in node_pairs + if not any(node in pair for node in {sender_node, receiver_node})] + + # FIX: 4/9/22 - SHOULD INCLUDE MECH SPEC (AND USE PRIMARY PORT) HERE: + elif isinstance(proj, Port): + # Implement default Projection (using matrix if specified) for all remaining specs try: + # FIX: 4/9/22 - INCLUDE TEST FOR DEFERRED_INIT WITH ONLY RECEIVER SPECIFIED if isinstance(proj, InputPort): - proj = MappingProjection(sender=sender, - receiver=proj) + for sender in senders: + proj_set.append(self.add_projection( + projection=MappingProjection(sender=sender, receiver=proj), + allow_duplicates=False, feedback=feedback)) + # FIX: 4/9/22 - INCLUDE TEST FOR DEFERRED_INIT WITH ONLY SENDER SPECIFIED elif isinstance(proj, OutputPort): - proj = MappingProjection(sender=proj, - receiver=receiver) + for receiver in receivers: + proj_set.append(self.add_projection( + projection=MappingProjection(sender=proj, receiver=receiver), + allow_duplicates=False, feedback=feedback)) + # Remove from node_pairs all pairs involving the owner of the Port + # (since all Projections to or from it have been implemented) + node_pairs = [pair for pair in node_pairs if (proj.owner not in pair)] except (InputPortError, ProjectionError) as error: raise ProjectionError(str(error.error_value)) except (InputPortError, ProjectionError, MappingError) as error: - raise CompositionError(f"Bad Projection specification in {pathway_arg_str} ({proj}): " - f"{str(error.error_value)}") - + handle_misc_errors(proj, error) except DuplicateProjectionError: - # FIX: 7/22/19 ADD WARNING HERE?? - # FIX: 7/22/19 MAKE THIS A METHOD ON Projection?? - duplicate = [p for p in receiver.afferents if p in sender.efferents] - assert len(duplicate)==1, \ - f"PROGRAM ERROR: Could not identify duplicate on DuplicateProjectionError " \ - f"for {Projection.__name__} between {sender.name} and {receiver.name} " \ - f"in call to {repr('add_linear_processing_pathway')} for {self.name}." - duplicate = duplicate[0] - warning_msg = f"Projection specified between {sender.name} and {receiver.name} " \ - f"in {pathway_arg_str} is a duplicate of one" - # IMPLEMENTATION NOTE: Version that allows different Projections between same - # sender and receiver in different Compositions - # if duplicate in self.projections: - # warnings.warn(f"{warning_msg} already in the Composition ({duplicate.name}) " - # f"and so will be ignored.") - # proj=duplicate - # else: - # if self.prefs.verbosePref: - # warnings.warn(f" that already exists between those nodes ({duplicate.name}). The " - # f"new one will be used; delete it if you want to use the existing one") - # Version that forbids *any* duplicate Projections between same sender and receiver - warnings.warn(f"{warning_msg} that already exists between those nodes ({duplicate.name}) " - f"and so will be ignored.") - proj=duplicate - - proj = self.add_projection(projection=proj, - sender=sender, - receiver=receiver, - feedback=feedback, - allow_duplicates=False) - if proj: - proj_set.append(proj) - else: - raise CompositionError(f"A Projection specified in {pathway_arg_str} " - f"is not between two Nodes: {pathway[c]}") + handle_duplicates(sender, receiver) + + # FIX: 4/9/22 - REPLACE BELOW WITH CALL TO _assign_default_proj_spec(sender, receiver) + # If a default Projection is specified and any sender-receiver pairs remain, assign default + if default_proj_spec and node_pairs: + for sender, receiver in node_pairs: + try: + p = self.add_projection(projection=deepcopy(default_proj_spec), + sender=sender, + receiver=receiver, + allow_duplicates=False, + feedback=feedback) + proj_set.append(p) + except (InputPortError, ProjectionError, MappingError) as error: + handle_misc_errors(proj, error) + except DuplicateProjectionError: + handle_duplicates(sender, receiver) + + # If there is a single Projection, extract it from list and append as Projection + # IMPLEMENTATION NOTE: + # this is to support calls to add_learing_processing_pathway by add_learning_<> methods + # that do not yet support a list or set of Projection specifications if len(proj_set) == 1: projections.append(proj_set[0]) else: projections.append(proj_set) + # BAD PATHWAY ENTRY: contains neither Node nor Projection specification(s) else: - raise CompositionError(f"An entry in {pathway_arg_str} is not a Node (Mechanism or Composition) " - f"or a Projection: {repr(pathway[c])}.") + assert False, f"PROGRAM ERROR : An entry in {pathway_arg_str} is not a Node (Mechanism " \ + f"or Composition) or a Projection nor a set of either: {repr(pathway[c])}." # Finally, clean up any tuple specs - for i, n in enumerate(nodes): - if isinstance(n, tuple): - nodes[i] = nodes[i][0] - # interleave nodes and projections - explicit_pathway = [nodes[0]] + for i, n_e in enumerate(node_entries): + for n in convert_to_list(n_e): + if isinstance(n, tuple): + nodes[i] = nodes[i][0] + # interleave (sets of) Nodes and (sets or lists of) Projections + explicit_pathway = [node_entries[0]] for i in range(len(projections)): explicit_pathway.append(projections[i]) - explicit_pathway.append(nodes[i + 1]) + explicit_pathway.append(node_entries[i + 1]) # If pathway is an existing one, return that existing_pathway = next((p for p in self.pathways if explicit_pathway==p.pathway), None) @@ -6698,7 +7035,8 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a pass else: # Otherwise, something has gone wrong - assert False, f"PROGRAM ERROR: Bad pathway specification for {self.name} in {pathway_arg_str}: {pathway}." + assert False, \ + f"PROGRAM ERROR: Bad pathway specification for {self.name} in {pathway_arg_str}: {pathway}." pathway = Pathway(pathway=explicit_pathway, composition=self, @@ -6710,150 +7048,6 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a return pathway - @handle_external_context() - def add_pathways(self, pathways, context=None): - """Add pathways to the Composition. - - Arguments - --------- - - pathways : Pathway or list[Pathway] - specifies one or more `Pathways ` to add to the Composition (see `Pathway_Specification`). - - Returns - ------- - - list[Pathway] : - List of `Pathways ` added to the Composition. - - """ - - # Possible specifications for **pathways** arg: - # 1 Single node: NODE - # Single pathway spec (list, tuple or dict): - # 2 single list: PWAY = [NODE] or [NODE...] in which *all* are NODES with optional intercolated Projections - # 3 single tuple: (PWAY, LearningFunction) = (NODE, LearningFunction) or - # ([NODE...], LearningFunction) - # 4 single dict: {NAME: PWAY} = {NAME: NODE} or - # {NAME: [NODE...]} or - # {NAME: ([NODE...], LearningFunction)} - # Multiple pathway specs (outer list): - # 5 list with list: [PWAY] = [NODE, [NODE]] or [[NODE...]...] - # 6 list with tuple: [(PWAY, LearningFunction)...] = [(NODE..., LearningFunction)...] or - # [([NODE...], LearningFunction)...] - # 7 list with dict: [{NAME: PWAY}...] = [{NAME: NODE...}...] or - # [{NAME: [NODE...]}...] or - # [{NAME: (NODE, LearningFunction)}...] or - # [{NAME: ([NODE...], LearningFunction)}...] - - from psyneulink.core.compositions.pathway import Pathway, _is_node_spec, _is_pathway_entry_spec - - if context.source == ContextFlags.COMMAND_LINE: - pathways_arg_str = f"'pathways' arg for the add_pathways method of {self.name}" - elif context.source == ContextFlags.CONSTRUCTOR: - pathways_arg_str = f"'pathways' arg of the constructor for {self.name}" - else: - assert False, f"PROGRAM ERROR: unrecognized context pass to add_pathways of {self.name}." - context.string = pathways_arg_str - - if not pathways: - return - - # Possibilities 1, 3 or 4 (single NODE, tuple or dict specified, so convert to list - elif _is_node_spec(pathways) or isinstance(pathways, (tuple, dict, Pathway)): - pathways = convert_to_list(pathways) - - # Possibility 2 (list is a single pathway spec): - if (isinstance(pathways, list) - and _is_node_spec(pathways[0]) and all(_is_pathway_entry_spec(p, ANY) for p in pathways)): - # Place in outter list (to conform to processing of multiple pathways below) - pathways = [pathways] - # If pathways is not now a list it must be illegitimate - if not isinstance(pathways, list): - raise CompositionError(f"The {pathways_arg_str} must be a " - f"Node, list, tuple, dict or Pathway object: {pathways}.") - - # pathways should now be a list in which each entry should be *some* form of pathway specification - # (including original spec as possibilities 5, 6, or 7) - - added_pathways = [] - - def identify_pway_type_and_parse_tuple_prn(pway, tuple_or_dict_str): - """ - Determine whether pway is PROCESSING_PATHWAY or LEARNING_PATHWAY and, if it is the latter, - parse tuple into pathway specification and LearningFunction. - Return pathway type, pathway, and learning_function or None - """ - learning_function = None - - if isinstance(pway, Pathway): - pway = pway.pathway - - if (_is_node_spec(pway) or isinstance(pway, list) or - # Forgive use of tuple to specify a pathway, and treat as if it was a list spec - (isinstance(pway, tuple) and all(_is_pathway_entry_spec(n, ANY) for n in pathway))): - pway_type = PROCESSING_PATHWAY - return pway_type, pway, None - elif isinstance(pway, tuple): - pway_type = LEARNING_PATHWAY - if len(pway)!=2: - raise CompositionError(f"A tuple specified in the {pathways_arg_str}" - f" has more than two items: {pway}") - pway, learning_function = pway - if not (_is_node_spec(pway) or isinstance(pway, (list, Pathway))): - raise CompositionError(f"The 1st item in {tuple_or_dict_str} specified in the " - f" {pathways_arg_str} must be a node or a list: {pway}") - if not (isinstance(learning_function, type) and issubclass(learning_function, LearningFunction)): - raise CompositionError(f"The 2nd item in {tuple_or_dict_str} specified in the " - f"{pathways_arg_str} must be a LearningFunction: {learning_function}") - return pway_type, pway, learning_function - else: - assert False, f"PROGRAM ERROR: arg to identify_pway_type_and_parse_tuple_prn in {self.name}" \ - f"is not a Node, list or tuple: {pway}" - - # Validate items in pathways list and add to Composition using relevant add_linear_XXX method. - for pathway in pathways: - pway_name = None - if isinstance(pathway, Pathway): - pway_name = pathway.name - pathway = pathway.pathway - if _is_node_spec(pathway) or isinstance(pathway, (list, tuple)): - pway_type, pway, pway_learning_fct = identify_pway_type_and_parse_tuple_prn(pathway, f"a tuple") - elif isinstance(pathway, dict): - if len(pathway)!=1: - raise CompositionError(f"A dict specified in the {pathways_arg_str} " - f"contains more than one entry: {pathway}.") - pway_name, pway = list(pathway.items())[0] - if not isinstance(pway_name, str): - raise CompositionError(f"The key in a dict specified in the {pathways_arg_str} must be a str " - f"(to be used as its name): {pway_name}.") - if _is_node_spec(pway) or isinstance(pway, (list, tuple, Pathway)): - pway_type, pway, pway_learning_fct = identify_pway_type_and_parse_tuple_prn(pway, - f"the value of a dict") - else: - raise CompositionError(f"The value in a dict specified in the {pathways_arg_str} must be " - f"a pathway specification (Node, list or tuple): {pway}.") - else: - raise CompositionError(f"Every item in the {pathways_arg_str} must be " - f"a Node, list, tuple or dict: {repr(pathway)} is not.") - - context.source = ContextFlags.METHOD - if pway_type == PROCESSING_PATHWAY: - new_pathway = self.add_linear_processing_pathway(pathway=pway, - name=pway_name, - context=context) - elif pway_type == LEARNING_PATHWAY: - new_pathway = self.add_linear_learning_pathway(pathway=pway, - learning_function=pway_learning_fct, - name=pway_name, - context=context) - else: - assert False, f"PROGRAM ERROR: failure to determine pathway_type in add_pathways for {self.name}." - - added_pathways.append(new_pathway) - - return added_pathways - # endregion PROCESSING PATHWAYS # region ------------------------------------ LEARNING ------------------------------------------------------------- @@ -7446,7 +7640,7 @@ def _create_backpropagation_learning_pathway(self, if path_length >= 3: # get the "terminal_sequence" -- # the last 2 nodes in the back prop pathway and the projection between them - # these components are are processed separately because + # these components are processed separately because # they inform the construction of the Target and Comparator mechs terminal_sequence = processing_pathway[path_length - 3: path_length] else: diff --git a/psyneulink/core/compositions/compositionfunctionapproximator.py b/psyneulink/core/compositions/compositionfunctionapproximator.py index 0623bb72ddb..1b657ae102a 100644 --- a/psyneulink/core/compositions/compositionfunctionapproximator.py +++ b/psyneulink/core/compositions/compositionfunctionapproximator.py @@ -59,6 +59,8 @@ __all__ = ['CompositionFunctionApproximator'] +from psyneulink.core.globals.parameters import check_user_specified + class CompositionFunctionApproximatorError(Exception): def __init__(self, error_value): @@ -105,6 +107,7 @@ class CompositionFunctionApproximator(Composition): componentCategory = COMPOSITION_FUNCTION_APPROXIMATOR + @check_user_specified def __init__(self, name=None, **param_defaults): # self.function = function super().__init__(name=name, **param_defaults) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 0ab934d0fc2..3162eae360a 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -150,7 +150,7 @@ from psyneulink.core.compositions.composition import Composition from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context from psyneulink.core.globals.keywords import BEFORE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified __all__ = ['ParameterEstimationComposition'] @@ -431,6 +431,7 @@ class Parameters(Composition.Parameters): setter=_same_seed_for_all_parameter_combinations_setter) @handle_external_context() + @check_user_specified def __init__(self, parameters, # OCM control_signals outcome_variables, # OCM monitor_for_control diff --git a/psyneulink/core/compositions/pathway.py b/psyneulink/core/compositions/pathway.py index 951385a36bc..da18203bc84 100644 --- a/psyneulink/core/compositions/pathway.py +++ b/psyneulink/core/compositions/pathway.py @@ -26,6 +26,9 @@ - `Pathway_Assignment_to_Composition` - `Pathway_Name` - `Pathway_Specification` + - `Pathway_Specification_Formats` + - `Pathway_Specification_Projections` + - `Pathway_Specification_Multiple` - `Composition_Add_Nested` * `Pathway_Structure` * `Pathway_Execution` @@ -37,9 +40,9 @@ -------- A Pathway is a sequence of `Nodes ` and `Projections `. Generally, Pathways are assigned -to `Compositions `, but a Pathway object can be created on its and used as a template for specifying a -Pathway for a Composition, as described below. See `Pathways ` for additional information about -Pathways in Compositions. +to a `Compositions`, but a Pathway object can also be created on its and used as a template for specifying a Pathway for +a Composition, as described below (see `Pathways ` for additional information about Pathways in +Compositions). .. _Pathway_Creation: @@ -54,7 +57,7 @@ *Pathway as a Template* ~~~~~~~~~~~~~~~~~~~~~~~ -A Pathway created on its own, using its constructor, is a **template**, that can be used to `specifiy a Pathway +A Pathway created on its own, using its constructor, is a **template**, that can be used to `specify a Pathway ` for one or more Compositions, as described `below `; however, it cannot be executed on its own. When a Pathway object is used to assign a Pathway to a Composition, its `pathway ` attribute, and its `name ` if that is not otherwise specified (see @@ -82,7 +85,7 @@ If the **name** argument of the Pathway's constructor is used to assign it a name, this is used as the name of the Pathway created when it is assigned to a Composition in its constructor, or using its `add_pathways ` method. This is also the case if one of the Composition's other `Pathway addition methods -` is used, as long as the **name** argument of those methods is not specified. +` is used, as long as the **name** argument of those methods is not specified. However, if the **name** argument is specified in those methods, or `Pathway specification dictionary ` is used to specify the Pathway's name, that takes precedence over, and replaces one specified in the Pathway `template's ` `name ` attribute. @@ -93,27 +96,149 @@ *Pathway Specification* ~~~~~~~~~~~~~~~~~~~~~~~ -The following formats can be used to specify a Pathway in the **pathway** argument of the constructor for the -Pathway, the **pathways** argument of a the constructor for a `Composition`, or the corresponding argument +Pathway are specified as a list, each element of which is either a `Node ` or set of Nodes, +possibly intercolated with specifications of `Projections ` between them. `Nodes ` +can be either a `Mechanism`, a `Composition`, or a tuple (Mechanism or Composition, `NodeRoles `) that can +be used to assign `required_roles` to the Nodes in the Composition (see `Composition_Nodes` for additional details). +The Node(s) specified in each entry of the list project to the Node(s) specified in the next entry. + + .. _Pathway_Projection_List_Note: + + .. note:: + Only a *set* can be used to specify multiple Nodes for a given entry in a Pathway; a *list* can *not* be used + for this purpose, as a list containing Nodes is always interpreted as a Pathway. If a list *is* included in a + Pathway specification, then it and all other entries are considered as separate, parallel Pathways (see + example *vii* in the `figure ` below). + +.. _Pathway_Specification_Projections: + +*Pathway Projection Specifications* +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Where no Projections are specified between entries in the list, default Projections (using a `FULL_CONNECTIVITY_MATRIX`; +see `MappingProjection_Matrix_Specification`) are created from each Node in the first entry, as the sender(s), +to each Node in the second, as receiver(s) (described further `below `). Projections between +Nodes in the two entries can also be specified explicitly, by intercolating a Projection or set of Projections between +the two entries in the list. If the sender and receiver are both a single Mechanism, then a single `MappingProjection` +can be `specified` between them. The same applies if the sender is a `Composition` with +a single `OUTPUT ` Node and/or the receiver is a `Composition` with a single `INPUT ` +Node. If either is a set of Nodes, or is a `nested Composition ` with more than one `INPUT +` or `OUTPUT ` Node, respectively, then a collection of Projections can be specified +between any or all pairs of the Nodes in the set(s) and/or nested Composition(s), using either a set or list of +Projections (order of specification does not matter whether a set or a list is used). The collection can contain +`MappingProjections ` between a specific pairs of Nodes and/or a single default specification +(either a `matrix ` specification or a MappingProjection without any `sender +` or `receiver ` specified). + + .. _Pathway_Projection_Matrix_Note: + + .. note:: + If a collection of Projection specifications includes a default matrix specification, then a list must be used + to specify the collection and *not* a set (since a matrix is unhashable and thus cannot be included in a set). + +If a default Projection specification is included in the set, it is used to implement a Projection between any pair +of Nodes for which no MappingProjection is otherwise specified, whether within the collection or on its own; if no +Projections are specified for any individual pairs, a default Projection is created for every pairing of senders and +receivers. If a collection contains Projections for one or more pairs of Nodes, but does not include a default +projection specification, then no Projection is created between any of the other pairings. + +If a pair of entries in a pathway has multiple sender and/or receiver Nodes specified (either in a set and/or belonging +to `nested Composition `, and either no Projection(s) or only a default Projection is intercollated +between them, then a default set of Projections is constructed (using the default Projection specification, if provided) +between each pair of sender and receiver Nodes in the set(s) or nested Composition(s), as follows: + +.. _Pathway_Projections: + +* *One to one* - if both the sender and receiver entries are Mechanisms, or if either is a Composition and the + sender has a single `OUTPUT ` Node and the receiver has a single `INPUT ` + Node, then a default `MappingProjection` is created from the `primary OutputPort ` of the + sender (or of its sole `OUTPUT ` Node, if the sender is a Composition) to the `primary InputPort + ` of the receiver (or of its sole of `INPUT ` Node, if the receiver is + a Composition), and the Projection specification is intercolated between the two entries in the `Pathway`. + +* *One to many* - if the sender is either a Mechanism or a Composition with a single `OUTPUT ` Node, + but the receiver is either a Composition with more than one `INPUT ` Node or a set of Nodes, then + a `MappingProjection` is created from the `primary OutputPort ` of the sender Mechanism (or of + its sole `OUTPUT ` Node if the sender is a Composition) to the `primary InputPort + ` of each `INPUT ` Node of the receiver Composition and/or Mechanism in the + receiver set, and a set containing the Projections is intercolated between the two entries in the `Pathway`. + +* *Many to one* - if the sender is a Composition with more than one `OUTPUT ` Node or a set of + Nodes, and the receiver is either a Mechanism or a Composition with a single `INPUT ` `OUTPUT + ` Node in the Composition or Mechanism in the set of sender(s), to the `primary InputPort + ` of the receiver Mechanism (or of its sole `INPUT ` Node if the receiver is + a Composition), and a set containing the Projections is intercolated between the two entries in the `Pathway`. + +* *Many to many* - if both the sender and receiver entries contain multiple Nodes (i.e., are sets, and/or the + the sender is a Composition that has more than one `INPUT ` Node and/or the receiver has more + than one `OUTPUT ` Node), then a Projection is constructed for every pairing of Nodes in the + sender and receiver entries, using the `primary OutputPort ` of each sender Node and the + `primary InputPort ` of each receiver node. + +| + + .. _Pathway_Figure: + + .. figure:: _static/Pathways_fig.svg + :scale: 50% + + **Examples of Pathway specifications** (including in the **pathways** argument of a `Composition`. *i)* Set + of `Nodes `: each is treated as a `SINGLETON ` within a single Pathway. + *ii)* List of Nodes: forms a sequential Pathway. *iii)* Single Node followed by a set: one to many mapping. + *iv)* Set followed by a single Node: many to one mapping. *v)* Set followed by a set: many to many mapping. + *vi)* Set followed by a list: because there is a list in the specification (``[C,D]``) all other entries are + also treated as parallel Pathways (see `note ` above), so ``A`` and ``B`` in the + set are `SINGLETON `\\s. *vii)* Set of Projections intercolated between two sets of Nodes: + since the set of Projections does not include any involving ``B`` or ``E`` nor a default Projection specification, + they are treated as `SINGLETON `\\s (compare with *x*). *viii)* Set followed by a Node and + then a set: many to one to many mapping. *ix)* Node followed by one that is a `nested Composition + ` then another Node: one to many to one mapping. *x)* Set followed by a list of Projections + then another set: since the list of Projections contains a default Projection specification (``matrix``) + Projections are created between all pairings of nodes in the sets that precede and follow the list (compare with + *vii*); note that the Projections must be specified in a list because the matrix is a list (or array), which + cannot be included in a set (see `note ` above). + + .. technical_note:: + The full code for the examples above can be found in `test_pathways_examples`, + although some have been graphically rearranged for illustrative purposes. + + + +.. _Pathway_Specification_Formats: + +*Pathway Specification Formats* +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following formats can be used to specify a Pathway in the **pathway** argument of the constructor for +the Pathway, the **pathways** argument of the constructor for a `Composition`, or the corresponding argument of any of a Composition's `Pathway addition methods `: - * `Node `: -- assigns the Node to a `SINGLETON` Pathway. + * `Node `: -- assigns the Node as `SINGLETON ` in a Pathway. .. .. _Pathway_Specification_List: * **list**: [`Node `, <`Projection(s) `,> `Node `...] -- - each item of the list must be a `Node ` -- i.e., Mechanism or Composition, or a - (`Mechanism `, `NodeRoles `) tuple -- or, optionally, a `Projection specification - `, a (`Projection specification `, `feedback specification - `) tuple, or a set of either interposed between a pair of nodes (see + each item of the list must be a `Node ` (i.e., Mechanism or Composition, or a + (`Mechanism `, `NodeRoles `) tuple) or set of Nodes, optionally with a `Projection + specification `, a (`Projection specification `, + `feedback specification `) tuple, or a set of either interposed between + a pair of (sets of) Nodes (see `add_linear_processing_pathway ` + for additional details). The list must begin and end with a (set of) Node(s). + .. + * **set**: {`Node `, `Node `...} -- + each item of the set must be a `Node ` (i.e., Mechanism or Composition, or a + (`Mechanism `, `NodeRoles `) tuple); each Node is treated as a `SINGLETON + `. Sets can also be used in a list specification (see above; and see `add_linear_processing_pathway ` for additional details). - The list must begin and end with a node. .. * **2-item tuple**: (Pathway, `LearningFunction`) -- used to specify a `learning Pathway - `; the 1st item must be a `Node ` or list, as - described above, and the 2nd item be a subclass of `LearningFunction`. + `; the 1st item must be one of the forms of Pathway specification + described above, and the 2nd item must be a subclass of `LearningFunction`. -.. _Multiple_Pathway_Specification: +.. _Pathway_Specification_Multiple: + +*Multiple Pathway Specifications* +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In addition to the forms of single Pathway specification `above `, where multiple Pathways can be specified (e.g., the **pathways** argument of the constructor for a `Composition` or its `add_pathways @@ -130,15 +255,15 @@ If any of the following is used to specify the **pathways** argument: * a **standalone** `Node ` (i.e., not in a list), \n * a **single Node** alone in a list, \n + * a **set** of Nodes, \n * one or more Nodes with any other form of `Pathway specification ` in the list \n - then each such Node in the list is treated as its own `SINGLETON` pathway (i.e., one containing a single - Node that is both the `ORIGIN` and the`TERMINAL` of the Pathway). However, if the list contains only - Nodes, then it is treated as a single Pathway (i.e., the list form of `Pathway specification - `. Thus: + then each such Node in the list is assigned as a `SINGLETON ` Node in its own Pathway. + However, if the list contains only Nodes, then it is treated as a single Pathway (i.e., the list form of + `Pathway specification ` described above. Thus: **pathway**: NODE -> single pathway \n **pathway**: [NODE] -> single pathway \n **pathway**: [NODE, NODE...] -> single pathway \n - **pathway**: [NODE, NODE, () or {} or `Pathway`...] -> three or more pathways + **pathway**: [NODE, () or {} or `Pathway`...] -> individual Pathways for each specification. .. _Pathway_Structure: @@ -210,13 +335,13 @@ def _is_pathway_entry_spec(entry, desired_type:tc.enum(NODE, PROJECTION, ANY)): """Test whether pathway entry is specified type (NODE or PROJECTION)""" from psyneulink.core.components.projections.projection import _is_projection_spec - node_specs = (Mechanism, Composition) - is_node = is_proj = False + node_types = (Mechanism, Composition) + is_node = is_proj = is_set = False if desired_type in {NODE, ANY}: - is_node = (isinstance(entry, node_specs) + is_node = (isinstance(entry, node_types) or (isinstance(entry, tuple) - and isinstance(entry[0], node_specs) + and isinstance(entry[0], node_types) and (isinstance(entry[1], NodeRole) or (isinstance(entry[1], list) and all(isinstance(nr, NodeRole) for nr in entry[1]))))) @@ -226,9 +351,13 @@ def _is_pathway_entry_spec(entry, desired_type:tc.enum(NODE, PROJECTION, ANY)): and _is_projection_spec(entry[0]) and entry[1] in {True, FEEDBACK, False, MAYBE}) or (isinstance(entry, (set,list)) + # or (isinstance(entry, set) and all(_is_projection_spec(item) for item in entry))) - if is_node or is_proj: + if desired_type in {ANY}: + is_set = (isinstance(entry, set) and all(_is_node_spec(item) for item in entry)) + + if is_node or is_proj or is_set: return True else: return False diff --git a/psyneulink/core/globals/__init__.py b/psyneulink/core/globals/__init__.py index 2119b48aeb9..45cdf94c8fa 100644 --- a/psyneulink/core/globals/__init__.py +++ b/psyneulink/core/globals/__init__.py @@ -1,6 +1,6 @@ from . import context from . import defaults -from . import json +from . import mdf from . import keywords from . import kvo from . import log @@ -12,10 +12,10 @@ from .context import * from .defaults import * -from .json import * from .keywords import * from .kvo import * from .log import * +from .mdf import * from .parameters import * from .preferences import * from .registry import * @@ -24,10 +24,10 @@ __all__ = list(context.__all__) __all__.extend(defaults.__all__) -__all__.extend(json.__all__) __all__.extend(keywords.__all__) __all__.extend(kvo.__all__) __all__.extend(log.__all__) +__all__.extend(mdf.__all__) __all__.extend(parameters.__all__) __all__.extend(preferences.__all__) __all__.extend(registry.__all__) diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index c65e6a5c965..713e9e16dfb 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -997,10 +997,6 @@ def _is_metric(metric): MODEL_SPEC_ID_PARAMETER_VALUE = 'value' MODEL_SPEC_ID_PARAMETER_INITIAL_VALUE = 'default_initial_value' -MODEL_SPEC_ID_NODES = 'nodes' -MODEL_SPEC_ID_PROJECTIONS = 'edges' -MODEL_SPEC_ID_COMPOSITION = 'graphs' - MODEL_SPEC_ID_MDF_VARIABLE = 'variable0' MODEL_SPEC_ID_SHAPE = 'shape' diff --git a/psyneulink/core/globals/json.py b/psyneulink/core/globals/mdf.py similarity index 54% rename from psyneulink/core/globals/json.py rename to psyneulink/core/globals/mdf.py index 4c598cc8c4d..d898bb8394a 100644 --- a/psyneulink/core/globals/json.py +++ b/psyneulink/core/globals/mdf.py @@ -3,36 +3,36 @@ Contents -------- - * `JSON_Overview` - * `JSON_Examples` - * `JSON_Model_Specification` + * `MDF_Overview` + * `MDF_Examples` + * `MDF_Model_Specification` -.. _JSON_Overview: +.. _MDF_Overview: Overview -------- The developers of PsyNeuLink are collaborating with the scientific community, as part of the `OpenNeuro effort -`_, to create a standard, JSON-based format for the description and exchange of computational +`_, to create a standard, serialzied format for the description and exchange of computational models of brain and psychological function across different simulation environments. As part of this effort, PsyNeuLink supports the `ModECI Model Description Format `_ (MDF) by including the ability to produce an MDF-compatible model from a PsyNeuLink model and to construct valid Python scripts that express a PsyNeuLink model from an MDF model. Any PsyNeuLink `Composition` or `Component` can be exported to MDF format using its `as_mdf_model` method or -to JSON format using its `json_summary` method. `json_summary` generates a string that, passed into the -`generate_script_from_json` function, produces a valid Python script replicating the original PsyNeuLink model. -`write_json_file` can be used to write the json_summary for one or more Compositions into a specified file (though -see `note `). `generate_script_from_json` can accept either the string returned -by `generate_script_from_json` or the name of a file containing one. -Calling ``exec(generate_script_from_json())`` will load into the current namespace all of the PsyNeuLink +to serialized format using its `json_summary` or `yaml_summary` methods. These methods generate strings that, passed into the +`generate_script_from_mdf` function, produce a valid Python script replicating the original PsyNeuLink model. +`write_mdf_file` can be used to write the serialization for one or more Compositions into a specified file (though +see `note `). `generate_script_from_mdf` can accept either the string returned +by `get_mdf_serialized` or the name of a file containing one. +Calling ``exec(generate_script_from_mdf())`` will load into the current namespace all of the PsyNeuLink objects specified in the ``input``; and `get_compositions` can be used to retrieve a list of all of the Compositions -in that namespace, including any generated by execution of `generate_script_from_json`. `generate_script_from_mdf` +in that namespace, including any generated by execution of `generate_script_from_mdf`. `generate_script_from_mdf` may similarly be used to create a PsyNeuLink Python script from a ModECI MDF Model object, such as that created by `as_mdf_model `. -.. _JSON_Security_Warning: +.. _MDF_Security_Warning: .. warning:: Use of `generate_script_from_json` or `generate_script_from_mdf` to generate a Python script from a file without taking proper precautions can @@ -40,7 +40,7 @@ exec, which has the potential to execute non-PsyNeuLink-related code embedded in the file. Therefore, `generate_script_from_json` or `generate_script_from_mdf` should be used to read only files of known and secure origin. -.. _JSON_Examples: +.. _MDF_Examples: Model Examples -------------- @@ -50,14 +50,14 @@ that will give the same results when run on the same input as the original. :download:`Download stroop_conflict_monitoring.py -<../../tests/json/stroop_conflict_monitoring.py>` +<../../tests/mdf/stroop_conflict_monitoring.py>` :download:`Download stroop_conflict_monitoring.json <../../docs/source/_static/stroop_conflict_monitoring.json>` -.. _JSON_Model_Specification: +.. _MDF_Model_Specification: -JSON/MDF Model Specification +MDF Model Specification ------------------------ .. note:: @@ -66,12 +66,22 @@ See https://github.com/ModECI/MDF/blob/main/docs/README.md#model +.. _MDF_Simple_Edge_Format: + +MDF Simple Edge Format +---------------------- + +Models may be output as they are in PsyNeuLink or in "simple edge" +format. In simple edge format, PsyNeuLink Projections are written as a +combination of two Edges and an intermediate Node, because the generic +MDF execution engine does not support using Functions on Edges. +PsyNeuLink is capable of re-importing models exported by PsyNeuLink in +either form. """ import ast import base64 import binascii -import copy import dill import enum import graph_scheduler @@ -80,38 +90,53 @@ import math import numbers import numpy +import os import pickle import pint import psyneulink import re +import tempfile import types +import time import warnings from psyneulink.core.globals.keywords import \ - MODEL_SPEC_ID_COMPOSITION, MODEL_SPEC_ID_GENERIC, MODEL_SPEC_ID_NODES, MODEL_SPEC_ID_PARAMETER_SOURCE, \ - MODEL_SPEC_ID_PARAMETER_INITIAL_VALUE, MODEL_SPEC_ID_PARAMETER_VALUE, MODEL_SPEC_ID_PROJECTIONS, MODEL_SPEC_ID_PSYNEULINK, MODEL_SPEC_ID_RECEIVER_MECH, MODEL_SPEC_ID_RECEIVER_PORT, \ - MODEL_SPEC_ID_SENDER_MECH, MODEL_SPEC_ID_SENDER_PORT, MODEL_SPEC_ID_TYPE, MODEL_SPEC_ID_OUTPUT_PORTS, MODEL_SPEC_ID_MDF_VARIABLE, MODEL_SPEC_ID_INPUT_PORTS, MODEL_SPEC_ID_SHAPE, MODEL_SPEC_ID_METADATA, MODEL_SPEC_ID_INPUT_PORT_COMBINATION_FUNCTION + MODEL_SPEC_ID_GENERIC, MODEL_SPEC_ID_PARAMETER_SOURCE, \ + MODEL_SPEC_ID_PARAMETER_INITIAL_VALUE, MODEL_SPEC_ID_PARAMETER_VALUE, MODEL_SPEC_ID_PSYNEULINK, \ + MODEL_SPEC_ID_TYPE, MODEL_SPEC_ID_MDF_VARIABLE, MODEL_SPEC_ID_SHAPE, MODEL_SPEC_ID_METADATA, MODEL_SPEC_ID_INPUT_PORT_COMBINATION_FUNCTION from psyneulink.core.globals.parameters import ParameterAlias from psyneulink.core.globals.sampleiterator import SampleIterator from psyneulink.core.globals.utilities import convert_to_list, gen_friendly_comma_str, get_all_explicit_arguments, \ parse_string_to_psyneulink_object_string, parse_valid_identifier, safe_equals, convert_to_np_array __all__ = [ - 'PNLJSONError', 'JSONDumpable', 'PNLJSONEncoder', + 'MDFError', 'MDFSerializable', 'PNLJSONEncoder', 'generate_json', 'generate_script_from_json', 'generate_script_from_mdf', - 'write_json_file' + 'write_json_file', 'get_mdf_model', 'get_mdf_serialized', 'write_mdf_file' ] -class PNLJSONError(Exception): +# file extension to mdf common name +supported_formats = { + 'json': 'json', + 'yml': 'yaml', + 'yaml': 'yaml', +} + + +class MDFError(Exception): pass -class JSONDumpable: +class MDFSerializable: @property def json_summary(self): return self.as_mdf_model().to_json() + @property + def yaml_summary(self): + return self.as_mdf_model().to_yaml() + # leaving this due to instructions in test_documentation_models # (useful for exporting Composition results to JSON) @@ -173,14 +198,36 @@ def _get_variable_parameter_name(obj): return MODEL_SPEC_ID_MDF_VARIABLE -def _substitute_expression_args(model): - # currently cannot use args with value expressions - if model.value is not None: - for arg, val in model.args.items(): - model.value = model.value.replace(arg, str(val)) +def _mdf_obj_from_dict(d): + import modeci_mdf.mdf as mdf + + def _get_mdf_object(obj, cls_): + try: + model_id = obj['id'] + except KeyError: + try: + model_id = obj['metadata']['name'] + except KeyError: + model_id = f'{cls_.__name__}_{time.perf_counter_ns()}' + + return cls_.from_dict({model_id: obj}) + + for cls_name in mdf.__all__: + cls_ = getattr(mdf, cls_name) + if all([attr.name in d or attr.name in {'id', 'parameters'} for attr in cls_.__attrs_attrs__]): + return _get_mdf_object(d, cls_) + + if 'function' in d and 'args' in d: + return _get_mdf_object(d, mdf.Function) + # nothing else seems to fit, try Function (unreliable) + if 'value' in d: + return _get_mdf_object(d, mdf.Function) -def _parse_component_type(component_dict): + return None + + +def _parse_component_type(model_obj): def get_pnl_component_type(s): from psyneulink.core.components.component import ComponentsMeta @@ -196,14 +243,15 @@ def get_pnl_component_type(s): raise type_str = None - if MODEL_SPEC_ID_TYPE in component_dict: - type_dict = component_dict[MODEL_SPEC_ID_TYPE] - else: + try: try: - type_dict = component_dict[MODEL_SPEC_ID_METADATA][MODEL_SPEC_ID_TYPE] - except KeyError: - # specifically for functions the keyword is not 'type' - type_str = component_dict['function'] + type_dict = model_obj.metadata[MODEL_SPEC_ID_TYPE] + except AttributeError: + # could be a dict specification + type_str = model_obj[MODEL_SPEC_ID_METADATA][MODEL_SPEC_ID_TYPE] + except (KeyError, TypeError): + # specifically for functions the keyword is not 'type' + type_str = model_obj.function if type_str is None: try: @@ -216,12 +264,12 @@ def get_pnl_component_type(s): type_str = type_dict elif isinstance(type_str, dict): if len(type_str) != 1: - raise PNLJSONError + raise MDFError else: elem = list(type_str.keys())[0] # not a function_type: args dict if MODEL_SPEC_ID_METADATA in type_str[elem]: - raise PNLJSONError + raise MDFError else: type_str = elem @@ -256,21 +304,19 @@ def get_pnl_component_type(s): else: return type_str - raise PNLJSONError( - 'Invalid type specified for JSON object: {0}'.format( - component_dict - ) - ) + raise MDFError(f'Invalid type specified for MDF object: {model_obj}') def _parse_parameter_value(value, component_identifiers=None, name=None, parent_parameters=None): + import modeci_mdf.mdf as mdf + if component_identifiers is None: component_identifiers = {} exec('import numpy') try: pnl_type = _parse_component_type(value) - except (KeyError, TypeError, PNLJSONError): + except (AttributeError, TypeError, MDFError): # ignore parameters that aren't components pnl_type = None @@ -296,8 +342,8 @@ def _parse_parameter_value(value, component_identifiers=None, name=None, parent_ try: value_type = eval(value[MODEL_SPEC_ID_TYPE]) except Exception as e: - raise PNLJSONError( - 'Invalid python type specified in JSON object: {0}'.format( + raise MDFError( + 'Invalid python type specified in MDF object: {0}'.format( value[MODEL_SPEC_ID_TYPE] ) ) from e @@ -334,57 +380,51 @@ def _parse_parameter_value(value, component_identifiers=None, name=None, parent_ parent_parameters ) else: - # it is either a Component spec or just a plain dict - try: - # try handling as a Component spec + if len(value) == 1: try: - comp_name = value['name'] + identifier = list(value.keys())[0] except KeyError: - comp_name = name - - if comp_name is not None: - identifier = parse_valid_identifier(comp_name) - if len(value) == 1: - try: - value = value[comp_name] - except KeyError: - pass - else: - if len(value) == 1: - comp_name = list(value.keys())[0] - identifier = parse_valid_identifier(comp_name) - if isinstance(value[comp_name], dict): - value = value[comp_name] - else: - raise PNLJSONError( - f'Component without name could reference multiple objects: {value}', - ) + identifier = name - if ( - identifier in component_identifiers - and component_identifiers[identifier] - ): - # if this spec is already created as a node elsewhere, - # then just use a reference - value = identifier - else: + mdf_object = value[identifier] + else: + try: + identifier = value['id'] + except KeyError: + identifier = name + + mdf_object = value + + # it is either a Component spec or just a plain dict + if ( + identifier in component_identifiers + and component_identifiers[identifier] + ): + # if this spec is already created as a node elsewhere, + # then just use a reference + value = identifier + else: + if not isinstance(mdf_object, mdf.Base): + mdf_object = _mdf_obj_from_dict(mdf_object) + + try: value = _generate_component_string( - value, + mdf_object, component_identifiers, - component_name=comp_name, + component_name=identifier, parent_parameters=parent_parameters ) - except (PNLJSONError, KeyError, TypeError): - # standard dict handling - value = '{{{0}}}'.format( - ', '.join([ - '{0}: {1}'.format( - str(_parse_parameter_value(k, component_identifiers, name)), - str(_parse_parameter_value(v, component_identifiers, name)) - ) - for k, v in value.items() - ]) - ) + except (AttributeError, MDFError, KeyError, TypeError): + # standard dict handling + value = '{{{0}}}'.format( + ', '.join([ + '{0}: {1}'.format( + str(_parse_parameter_value(k, component_identifiers, name)), + str(_parse_parameter_value(v, component_identifiers, name)) + ) + for k, v in value.items() + ]) + ) elif isinstance(value, str): # handle pointer to parent's parameter value @@ -458,11 +498,19 @@ def _parse_parameter_value(value, component_identifiers=None, name=None, parent_ ): value = f"'{value}'" + elif isinstance(value, mdf.Base): + value = _generate_component_string( + value, + component_identifiers, + component_name=value.id, + parent_parameters=parent_parameters + ) + return value def _generate_component_string( - component_dict, + component_model, component_identifiers, component_name=None, parent_parameters=None, @@ -471,63 +519,63 @@ def _generate_component_string( ): from psyneulink.core.components.functions.function import Function_Base from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction - from psyneulink.core.components.projections.projection import Projection_Base try: - component_type = _parse_component_type(component_dict) - except KeyError as e: + component_type = _parse_component_type(component_model) + except AttributeError as e: # acceptable to exclude type currently if default_type is not None: component_type = default_type else: raise type(e)( - f'{component_dict} has no PNL or generic type and no ' + f'{component_model} has no PNL or generic type and no ' 'default_type is specified' ) from e if component_name is None: - name = component_dict['name'] + name = component_model.id else: name = component_name try: - assert component_name == component_dict['name'] + assert component_name == component_model.id except KeyError: pass is_user_defined_function = False try: - parameters = dict(component_dict[component_type._model_spec_id_parameters]) + parameters = dict(getattr(component_model, component_type._model_spec_id_parameters)) except AttributeError: is_user_defined_function = True - except KeyError: + except TypeError: parameters = {} if is_user_defined_function or component_type is UserDefinedFunction: custom_func = component_type component_type = UserDefinedFunction try: - parameters = dict(component_dict[component_type._model_spec_id_parameters]) - except KeyError: + parameters = dict(getattr(component_model, component_type._model_spec_id_parameters)) + except TypeError: parameters = {} parameters['custom_function'] = f'{custom_func}' try: - del component_dict[MODEL_SPEC_ID_METADATA]['custom_function'] + del component_model.metadata['custom_function'] except KeyError: pass try: - parameters.update(component_dict[component_type._model_spec_id_stateful_parameters]) - except KeyError: + parameters.update(getattr(component_model, component_type._model_spec_id_parameters)) + except TypeError: pass try: # args in function dict - parameters.update(component_dict['function'][list(component_dict['function'].keys())[0]]) + parameters.update(component_model.function[list(component_model.function.keys())[0]]) except (AttributeError, KeyError): pass parameter_names = {} + # TODO: remove this? # If there is a parameter that is the psyneulink identifier string # (as of this comment, 'pnl'), then expand these parameters as # normal ones. We don't check and expand for other @@ -540,20 +588,20 @@ def _generate_component_string( pass try: - metadata = component_dict[MODEL_SPEC_ID_METADATA] - except KeyError: - metadata = {} - - if issubclass(component_type, Projection_Base): + functions = component_model.functions + except AttributeError: try: - component_dict['functions'] = metadata['functions'] + functions = [_mdf_obj_from_dict(v) for k, v in component_model.metadata['functions'].items()] except KeyError: - pass + functions = None + except AttributeError: + functions = component_model.metadata['functions'] # pnl objects only have one function unless specified in another way # than just "function" - if 'functions' in component_dict: - dup_function_names = set([name for name in component_dict['functions'] if name in component_identifiers]) + + if functions is not None: + dup_function_names = set([f.id for f in functions if f.id in component_identifiers]) if len(dup_function_names) > 0: warnings.warn( f'Functions ({gen_friendly_comma_str(dup_function_names)}) of' @@ -564,8 +612,8 @@ def _generate_component_string( function_determined_by_output_port = False try: - output_ports = component_dict[MODEL_SPEC_ID_OUTPUT_PORTS] - except KeyError: + output_ports = component_model.output_ports + except AttributeError: pass else: if len(output_ports) == 1 or isinstance(output_ports, list): @@ -577,7 +625,7 @@ def _generate_component_string( else: try: # 'out_port' appears to be the general primary output_port term - # should ideally have a marker in json to define it as primary + # should ideally have a marker in mdf to define it as primary primary_output_port = output_ports['out_port'] except KeyError: pass @@ -585,17 +633,15 @@ def _generate_component_string( function_determined_by_output_port = True # neuroml-style mdf has MODEL_SPEC_ID_PARAMETER_VALUE in output port definitions - if function_determined_by_output_port and MODEL_SPEC_ID_PARAMETER_VALUE in primary_output_port: - parameter_names['function'] = re.sub(r'(.*)\[\d+\]', '\\1', primary_output_port[MODEL_SPEC_ID_PARAMETER_VALUE]) + if function_determined_by_output_port and hasattr(primary_output_port, MODEL_SPEC_ID_PARAMETER_VALUE): + parameter_names['function'] = re.sub(r'(.*)\[\d+\]', '\\1', getattr(primary_output_port, MODEL_SPEC_ID_PARAMETER_VALUE)) else: parameter_names['function'] = [ - f for f in component_dict['functions'] - if not f.endswith(MODEL_SPEC_ID_INPUT_PORT_COMBINATION_FUNCTION) + f.id for f in functions + if not f.id.endswith(MODEL_SPEC_ID_INPUT_PORT_COMBINATION_FUNCTION) ][0] - parameters['function'] = { - parameter_names['function']: component_dict['functions'][parameter_names['function']] - } + parameters['function'] = [f for f in functions if f.id == parameter_names['function']][0] assignment_str = f'{parse_valid_identifier(name)} = ' if assignment else '' @@ -617,7 +663,7 @@ def _generate_component_string( parameters = { **{k: v for k, v in parent_parameters.items() if isinstance(v, dict) and MODEL_SPEC_ID_PARAMETER_INITIAL_VALUE in v}, **parameters, - **metadata + **(component_model.metadata if component_model.metadata is not None else {}) } # MDF input ports do not have functions, so their shape is @@ -626,13 +672,9 @@ def _generate_component_string( # the input port shape if input_ports parameter is specified if 'variable' not in parameters and 'input_ports' not in parameters: try: - ip = parameters['function'][Function_Base._model_spec_id_parameters][MODEL_SPEC_ID_MDF_VARIABLE] + ip = getattr(parameters['function'], Function_Base._model_spec_id_parameters)[MODEL_SPEC_ID_MDF_VARIABLE] var = convert_to_np_array( - numpy.zeros( - ast.literal_eval( - component_dict[MODEL_SPEC_ID_INPUT_PORTS][ip][MODEL_SPEC_ID_SHAPE] - ) - ), + numpy.zeros(ast.literal_eval(component_model.input_ports[ip][MODEL_SPEC_ID_SHAPE])), dimension=2 ).tolist() parameters['variable'] = var @@ -766,57 +808,46 @@ def parameter_value_matches_default(component_type, param, value): def _generate_scheduler_string( scheduler_id, - scheduler_dict, + scheduler_model, component_identifiers, blacklist=[] ): output = [] - try: - node_specific_conds = scheduler_dict['node_specific'] - except KeyError: - pass - else: - for node, condition in node_specific_conds.items(): - if node not in blacklist: - output.append( - '{0}.add_condition({1}, {2})'.format( - scheduler_id, - parse_valid_identifier(node), - _generate_condition_string( - condition, - component_identifiers - ) + + for node, condition in scheduler_model.node_specific.items(): + if node not in blacklist: + output.append( + '{0}.add_condition({1}, {2})'.format( + scheduler_id, + parse_valid_identifier(node), + _generate_condition_string( + condition, + component_identifiers ) ) - - output.append('') + ) termination_str = [] - try: - termination_conds = scheduler_dict['termination'] - except KeyError: - pass - else: - for scale, cond in termination_conds.items(): - termination_str.insert( - 1, - 'psyneulink.{0}: {1}'.format( - f'TimeScale.{str.upper(scale)}', - _generate_condition_string(cond, component_identifiers) - ) + for scale, cond in scheduler_model.termination.items(): + termination_str.insert( + 1, + 'psyneulink.{0}: {1}'.format( + f'TimeScale.{str.upper(scale)}', + _generate_condition_string(cond, component_identifiers) ) + ) - output.append( - '{0}.termination_conds = {{{1}}}'.format( - scheduler_id, - ', '.join(termination_str) - ) + output.append( + '{0}.termination_conds = {{{1}}}'.format( + scheduler_id, + ', '.join(termination_str) ) + ) return '\n'.join(output) -def _generate_condition_string(condition_dict, component_identifiers): +def _generate_condition_string(condition_model, component_identifiers): def _parse_condition_arg_value(value): try: identifier = parse_valid_identifier(value) @@ -827,7 +858,7 @@ def _parse_condition_arg_value(value): return str(identifier) try: - getattr(psyneulink.core.scheduling.condition, value['type']) + getattr(psyneulink.core.scheduling.condition, value.type) except (AttributeError, KeyError, TypeError): pass else: @@ -853,7 +884,7 @@ def _parse_graph_scheduler_type(typ): return typ args_str = '' - cond_type = _parse_graph_scheduler_type(condition_dict[MODEL_SPEC_ID_TYPE]) + cond_type = _parse_graph_scheduler_type(condition_model.type) sig = inspect.signature(getattr(psyneulink, cond_type).__init__) var_positional_arg_name = None @@ -863,7 +894,7 @@ def _parse_graph_scheduler_type(typ): var_positional_arg_name = name break - args_dict = condition_dict['args'] + args_dict = condition_model.kwargs try: pos_args = args_dict[var_positional_arg_name] @@ -906,30 +937,8 @@ def _parse_graph_scheduler_type(typ): return f'psyneulink.{cond_type}({arguments_str})' -def _generate_composition_string(graphs_dict, component_identifiers): - def _replace_function_node_with_mech_node(function_dict, name, typ=None): - if typ is None: - typ = _parse_component_type(function_dict) - else: - typ = typ.__name__ - - mech_func_dict = { - 'functions': { - name: { - MODEL_SPEC_ID_TYPE: {MODEL_SPEC_ID_PSYNEULINK: typ}, - psyneulink.Function_Base._model_spec_id_parameters: function_dict[psyneulink.Component._model_spec_id_parameters] - }, - } - } - - try: - del function_dict[MODEL_SPEC_ID_TYPE] - except KeyError: - pass - - function_dict['name'] = f"{name}_wrapped_mech" - - return {**function_dict, **mech_func_dict} +def _generate_composition_string(graph, component_identifiers): + import modeci_mdf.mdf as mdf # used if no generic types are specified default_composition_type = psyneulink.Composition @@ -947,410 +956,321 @@ def _replace_function_node_with_mech_node(function_dict, name, typ=None): ) output = [] - # may be given multiple compositions - for comp_name, composition_dict in graphs_dict.items(): - try: - assert comp_name == composition_dict['name'] - except KeyError: - pass - - comp_identifer = parse_valid_identifier(comp_name) + comp_identifer = parse_valid_identifier(graph.id) - def alphabetical_order(items): - alphabetical = enumerate( - sorted(items) - ) - return { - parse_valid_identifier(item[1]): item[0] - for item in alphabetical - } - - # get order in which nodes were added - # may be node names or dictionaries - try: - node_order = composition_dict[MODEL_SPEC_ID_METADATA]['node_ordering'] - node_order = { - parse_valid_identifier(list(node.keys())[0]) if isinstance(node, dict) - else parse_valid_identifier(node): node_order.index(node) - for node in node_order - } - - unspecified_node_order = { - node: position + len(node_order) - for node, position in alphabetical_order([ - n for n in composition_dict[MODEL_SPEC_ID_NODES] if n not in node_order - ]).items() - } - - node_order.update(unspecified_node_order) - - assert all([ - (parse_valid_identifier(node) in node_order) - for node in composition_dict[MODEL_SPEC_ID_NODES] - ]) - except (KeyError, TypeError, AssertionError): - # if no node_ordering attribute exists, fall back to - # alphabetical order - node_order = alphabetical_order(composition_dict[MODEL_SPEC_ID_NODES]) - - # clean up pnl-specific and other software-specific items - pnl_specific_items = {} - keys_to_delete = [] - - for name, node in composition_dict[MODEL_SPEC_ID_NODES].items(): - try: - component_type = _parse_component_type(node) - except KeyError: - # will use a default type - pass - except PNLJSONError: - # node isn't a node dictionary, but a dict of dicts, - # indicating a software-specific set of nodes or - # a composition - if name == MODEL_SPEC_ID_PSYNEULINK: - pnl_specific_items = node - - if MODEL_SPEC_ID_COMPOSITION not in node: - keys_to_delete.append(name) - else: - # projection was written out as a node for simple_edge_format - if issubclass(component_type, psyneulink.Projection_Base): - assert len(node[MODEL_SPEC_ID_INPUT_PORTS]) == 1 - assert len(node[MODEL_SPEC_ID_OUTPUT_PORTS]) == 1 - - extra_projs_to_delete = set() - - sender = None - sender_port = None - receiver = None - receiver_port = None - - for proj_name, proj in composition_dict[MODEL_SPEC_ID_PROJECTIONS].items(): - if proj[MODEL_SPEC_ID_RECEIVER_MECH] == name: - assert 'dummy' in proj_name - sender = proj[MODEL_SPEC_ID_SENDER_MECH] - sender_port = proj[MODEL_SPEC_ID_SENDER_PORT] - extra_projs_to_delete.add(proj_name) - - if proj[MODEL_SPEC_ID_SENDER_MECH] == name: - assert 'dummy' in proj_name - receiver = proj[MODEL_SPEC_ID_RECEIVER_MECH] - receiver_port = proj[MODEL_SPEC_ID_RECEIVER_PORT] - # if for some reason the projection has node as both sender and receiver - # this is a bug, let the deletion fail - extra_projs_to_delete.add(proj_name) - - if sender is None: - raise PNLJSONError(f'Dummy node {name} for projection has no sender in projections list') - - if receiver is None: - raise PNLJSONError(f'Dummy node {name} for projection has no receiver in projections list') - - proj_dict = { - **{ - MODEL_SPEC_ID_SENDER_PORT: sender_port, - MODEL_SPEC_ID_RECEIVER_PORT: receiver_port, - MODEL_SPEC_ID_SENDER_MECH: sender, - MODEL_SPEC_ID_RECEIVER_MECH: receiver - }, - **{ - MODEL_SPEC_ID_METADATA: { - # variable isn't specified for projections - **{k: v for k, v in node[MODEL_SPEC_ID_METADATA].items() if k != 'variable'}, - 'functions': node['functions'] - } - }, - } - try: - proj_dict[component_type._model_spec_id_parameters] = node[psyneulink.Component._model_spec_id_parameters] - except KeyError: - pass - - composition_dict[MODEL_SPEC_ID_PROJECTIONS][name.rstrip('_dummy_node')] = proj_dict - - keys_to_delete.append(name) - for p in extra_projs_to_delete: - del composition_dict[MODEL_SPEC_ID_PROJECTIONS][p] + def alphabetical_order(items): + alphabetical = enumerate( + sorted(items) + ) + return { + parse_valid_identifier(item[1]): item[0] + for item in alphabetical + } - for nr_item in ['required_node_roles', 'excluded_node_roles']: - nr_removal_indices = [] + # get order in which nodes were added + # may be node names or dictionaries + try: + node_order = graph.metadata['node_ordering'] + node_order = { + parse_valid_identifier(list(node.keys())[0]) if isinstance(node, dict) + else parse_valid_identifier(node): node_order.index(node) + for node in node_order + } - for i, (nr_name, nr_role) in enumerate( - composition_dict[MODEL_SPEC_ID_METADATA][nr_item] - ): - if nr_name == name: - nr_removal_indices.append(i) + unspecified_node_order = { + node: position + len(node_order) + for node, position in alphabetical_order([ + parse_valid_identifier(n.id) for n in graph.nodes if n.id not in node_order + ]).items() + } - for i in nr_removal_indices: - del composition_dict[MODEL_SPEC_ID_METADATA][nr_item][i] + node_order.update(unspecified_node_order) - for nodes_dict in pnl_specific_items: - for name, node in nodes_dict.items(): - composition_dict[MODEL_SPEC_ID_NODES][name] = node + assert all([ + (parse_valid_identifier(node.id) in node_order) + for node in graph.nodes + ]) + except (KeyError, TypeError, AssertionError): + # if no node_ordering attribute exists, fall back to + # alphabetical order + node_order = alphabetical_order([parse_valid_identifier(n.id) for n in graph.nodes]) - for name_to_delete in keys_to_delete: - del composition_dict[MODEL_SPEC_ID_NODES][name_to_delete] + keys_to_delete = [] + for node in graph.nodes: try: - edges_dict = composition_dict[MODEL_SPEC_ID_PROJECTIONS] - pnl_specific_items = {} - keys_to_delete = [] - except KeyError: + component_type = _parse_component_type(node) + except (AttributeError, KeyError): + # will use a default type pass else: - for name, edge in edges_dict.items(): - try: - _parse_component_type(edge) - except KeyError: - # will use a default type - pass - except PNLJSONError: - if name == MODEL_SPEC_ID_PSYNEULINK: - pnl_specific_items = edge - - keys_to_delete.append(name) - - for name, edge in pnl_specific_items.items(): - # exclude CIM projections because they are automatically - # generated - if ( - edge[MODEL_SPEC_ID_SENDER_MECH] != comp_name - and edge[MODEL_SPEC_ID_RECEIVER_MECH] != comp_name - ): - composition_dict[MODEL_SPEC_ID_PROJECTIONS][name] = edge - - for name_to_delete in keys_to_delete: - del composition_dict[MODEL_SPEC_ID_PROJECTIONS][name_to_delete] - - # generate string for Composition itself - output.append( - "{0} = {1}\n".format( - comp_identifer, - _generate_component_string( - composition_dict, - component_identifiers, - component_name=comp_name, - default_type=default_composition_type + # projection was written out as a node for simple_edge_format + if issubclass(component_type, psyneulink.Projection_Base): + assert len(node.input_ports) == 1 + assert len(node.output_ports) == 1 + + extra_projs_to_delete = set() + + sender = None + sender_port = None + receiver = None + receiver_port = None + + for proj in graph.edges: + if proj.receiver == node.id: + assert 'dummy' in proj.id + sender = proj.sender + sender_port = proj.sender_port + extra_projs_to_delete.add(proj.id) + + if proj.sender == node.id: + assert 'dummy' in proj.id + receiver = proj.receiver + receiver_port = proj.receiver_port + # if for some reason the projection has node as both sender and receiver + # this is a bug, let the deletion fail + extra_projs_to_delete.add(proj.id) + + if sender is None: + raise MDFError(f'Dummy node {node.id} for projection has no sender in projections list') + + if receiver is None: + raise MDFError(f'Dummy node {node.id} for projection has no receiver in projections list') + + main_proj = mdf.Edge( + id=node.id.rstrip('_dummy_node'), + sender=sender, + receiver=receiver, + sender_port=sender_port, + receiver_port=receiver_port, + metadata={ + # variable isn't specified for projections + **{k: v for k, v in node.metadata.items() if k != 'variable'}, + 'functions': node.functions + } ) + proj.parameters = {p.id: p for p in node.parameters} + graph.edges.append(main_proj) + + keys_to_delete.append(node.id) + for p in extra_projs_to_delete: + del graph.edges[graph.edges.index([e for e in graph.edges if e.id == p][0])] + + for nr_item in ['required_node_roles', 'excluded_node_roles']: + nr_removal_indices = [] + + for i, (nr_name, nr_role) in enumerate( + graph.metadata[nr_item] + ): + if nr_name == node.id: + nr_removal_indices.append(i) + + for i in nr_removal_indices: + del graph.metadata[nr_item][i] + + for name_to_delete in keys_to_delete: + del graph.nodes[graph.nodes.index([n for n in graph.nodes if n.id == name_to_delete][0])] + + # generate string for Composition itself + output.append( + "{0} = {1}\n".format( + comp_identifer, + _generate_component_string( + graph, + component_identifiers, + component_name=graph.id, + default_type=default_composition_type ) ) - component_identifiers[comp_identifer] = True - - mechanisms = {} - compositions = {} - control_mechanisms = {} - implicit_mechanisms = {} - - # add nested compositions and mechanisms in order they were added - # to this composition - for name, node in sorted( - composition_dict[MODEL_SPEC_ID_NODES].items(), - key=lambda item: node_order[parse_valid_identifier(item[0])] - ): - if MODEL_SPEC_ID_COMPOSITION in node: - compositions[name] = node[MODEL_SPEC_ID_COMPOSITION] - else: - try: - component_type = _parse_component_type(node) - except KeyError: - component_type = default_node_type - identifier = parse_valid_identifier(name) - if issubclass(component_type, control_mechanism_types): - control_mechanisms[name] = node - component_identifiers[identifier] = True - elif issubclass(component_type, implicit_types): - implicit_mechanisms[name] = node - else: - mechanisms[name] = node - component_identifiers[identifier] = True - - implicit_names = [ - x - for x in [*implicit_mechanisms.keys(), *control_mechanisms.keys()] - ] - - for name, mech in copy.copy(mechanisms).items(): + ) + component_identifiers[comp_identifer] = True + + mechanisms = [] + compositions = [] + control_mechanisms = [] + implicit_mechanisms = [] + + # add nested compositions and mechanisms in order they were added + # to this composition + for node in sorted( + graph.nodes, + key=lambda item: node_order[parse_valid_identifier(item.id)] + ): + if isinstance(node, mdf.Graph): + compositions.append(node) + else: try: - mech_type = _parse_component_type(mech) - except KeyError: - mech_type = None - - if ( - isinstance(mech_type, type) - and issubclass(mech_type, psyneulink.Function) - ): - mech = _replace_function_node_with_mech_node(mech, name, mech_type) - - component_identifiers[mech['name']] = component_identifiers[name] - del component_identifiers[name] - - node_order[mech['name']] = node_order[name] - del node_order[name] + component_type = _parse_component_type(node) + except (AttributeError, KeyError): + component_type = default_node_type + identifier = parse_valid_identifier(node.id) + if issubclass(component_type, control_mechanism_types): + control_mechanisms.append(node) + component_identifiers[identifier] = True + elif issubclass(component_type, implicit_types): + implicit_mechanisms.append(node) + else: + mechanisms.append(node) + component_identifiers[identifier] = True - mechanisms[mech['name']] = mechanisms[name] - del mechanisms[name] + implicit_names = [node.id for node in implicit_mechanisms + control_mechanisms] - composition_dict['nodes'][mech['name']] = composition_dict['nodes'][name] - del composition_dict['nodes'][name] + for mech in mechanisms: + try: + mech_type = _parse_component_type(mech) + except (AttributeError, KeyError): + mech_type = None - name = mech['name'] + if ( + isinstance(mech_type, type) + and issubclass(mech_type, psyneulink.Function) + ): + # removed branch converting functions defined as nodes + # should no longer happen with recent MDF versions + assert False - output.append( - _generate_component_string( - mech, - component_identifiers, - component_name=name, - assignment=True, - default_type=default_node_type - ) + output.append( + _generate_component_string( + mech, + component_identifiers, + component_name=parse_valid_identifier(mech.id), + assignment=True, + default_type=default_node_type ) - if len(mechanisms) > 0: - output.append('') + ) + if len(mechanisms) > 0: + output.append('') - for name, mech in control_mechanisms.items(): - output.append( - _generate_component_string( - mech, - component_identifiers, - component_name=name, - assignment=True, - default_type=default_node_type - ) + for mech in control_mechanisms: + output.append( + _generate_component_string( + mech, + component_identifiers, + component_name=parse_valid_identifier(mech.id), + assignment=True, + default_type=default_node_type ) + ) - if len(control_mechanisms) > 0: - output.append('') + if len(control_mechanisms) > 0: + output.append('') - # recursively generate string for inner Compositions - for name, comp in compositions.items(): - output.append( - _generate_composition_string( - comp, - component_identifiers - ) + # recursively generate string for inner Compositions + for comp in compositions: + output.append( + _generate_composition_string( + comp, + component_identifiers ) - if len(compositions) > 0: - output.append('') - - # generate string to add the nodes to this Composition - try: - node_roles = { - parse_valid_identifier(node): role for (node, role) in - composition_dict[MODEL_SPEC_ID_METADATA]['required_node_roles'] - } - except KeyError: - node_roles = [] + ) + if len(compositions) > 0: + output.append('') - try: - excluded_node_roles = { - parse_valid_identifier(node): role for (node, role) in - composition_dict[MODEL_SPEC_ID_METADATA]['excluded_node_roles'] - } - except KeyError: - excluded_node_roles = [] + # generate string to add the nodes to this Composition + try: + node_roles = { + parse_valid_identifier(node): role for (node, role) in + graph.metadata['required_node_roles'] + } + except KeyError: + node_roles = [] - # do not add the controller as a normal node - try: - controller_name = list(composition_dict[MODEL_SPEC_ID_METADATA]['controller'].keys())[0] - except (AttributeError, KeyError, TypeError): - controller_name = None + try: + excluded_node_roles = { + parse_valid_identifier(node): role for (node, role) in + graph.metadata['excluded_node_roles'] + } + except KeyError: + excluded_node_roles = [] - for name in sorted( - composition_dict[MODEL_SPEC_ID_NODES], - key=lambda item: node_order[parse_valid_identifier(item)] + # do not add the controller as a normal node + try: + controller_name = graph.metadata['controller']['id'] + except (AttributeError, KeyError, TypeError): + controller_name = None + + for node in sorted( + graph.nodes, + key=lambda item: node_order[parse_valid_identifier(item.id)] + ): + name = node.id + if ( + name not in implicit_names + and name != controller_name ): - if ( - name not in implicit_names - and name != controller_name - ): - name = parse_valid_identifier(name) + name = parse_valid_identifier(name) + output.append( + '{0}.add_node({1}{2})'.format( + comp_identifer, + name, + ', {0}'.format( + _parse_parameter_value( + node_roles[name], + component_identifiers + ) + ) if name in node_roles else '' + ) + ) + if len(graph.nodes) > 0: + output.append('') + + if len(excluded_node_roles) > 0: + for node, roles in excluded_node_roles.items(): + if name not in implicit_names and name != controller_name: output.append( - '{0}.add_node({1}{2})'.format( - comp_identifer, - name, - ', {0}'.format( - _parse_parameter_value( - node_roles[name], - component_identifiers - ) - ) if name in node_roles else '' - ) + f'{comp_identifer}.exclude_node_roles({node}, {_parse_parameter_value(roles, component_identifiers)})' ) - if len(composition_dict[MODEL_SPEC_ID_NODES]) > 0: - output.append('') - - if len(excluded_node_roles) > 0: - for node, roles in excluded_node_roles.items(): - if name not in implicit_names and name != controller_name: - output.append( - f'{comp_identifer}.exclude_node_roles({node}, {_parse_parameter_value(roles, component_identifiers)})' - ) - output.append('') + output.append('') + # generate string to add the projections + for proj in graph.edges: try: - edges_dict = composition_dict[MODEL_SPEC_ID_PROJECTIONS] - except KeyError: - pass - else: - # generate string to add the projections - for name, projection_dict in edges_dict.items(): - try: - projection_type = _parse_component_type(projection_dict) - except KeyError: - projection_type = default_edge_type - - if ( - not issubclass(projection_type, implicit_types) - and projection_dict[MODEL_SPEC_ID_SENDER_MECH] not in implicit_names - and projection_dict[MODEL_SPEC_ID_RECEIVER_MECH] not in implicit_names - ): - output.append( - '{0}.add_projection(projection={1}, sender={2}, receiver={3})'.format( - comp_identifer, - _generate_component_string( - projection_dict, - component_identifiers, - component_name=name, - default_type=default_edge_type - ), - parse_valid_identifier( - projection_dict[MODEL_SPEC_ID_SENDER_MECH] - ), - parse_valid_identifier( - projection_dict[MODEL_SPEC_ID_RECEIVER_MECH] - ), - ) - ) + projection_type = _parse_component_type(proj) + except (AttributeError, KeyError): + projection_type = default_edge_type - # add controller if it exists (must happen after projections) - if controller_name is not None: + if ( + not issubclass(projection_type, implicit_types) + and proj.sender not in implicit_names + and proj.receiver not in implicit_names + ): output.append( - '{0}.add_controller({1})'.format( + '{0}.add_projection(projection={1}, sender={2}, receiver={3})'.format( comp_identifer, - parse_valid_identifier(controller_name) + _generate_component_string( + proj, + component_identifiers, + default_type=default_edge_type + ), + parse_valid_identifier(proj.sender), + parse_valid_identifier(proj.receiver), ) ) - # add schedulers - # blacklist automatically generated nodes because they will - # not exist in the script namespace - try: - conditions = composition_dict['conditions'] - except KeyError: - conditions = {} - - output.append('') + # add controller if it exists (must happen after projections) + if controller_name is not None: output.append( - _generate_scheduler_string( - f'{comp_identifer}.scheduler', - conditions, - component_identifiers, - blacklist=implicit_names + '{0}.add_controller({1})'.format( + comp_identifer, + parse_valid_identifier(controller_name) ) ) - return '\n'.join(output) + # add schedulers + # blacklist automatically generated nodes because they will + # not exist in the script namespace + output.append('') + output.append( + _generate_scheduler_string( + f'{comp_identifer}.scheduler', + graph.conditions, + component_identifiers, + blacklist=implicit_names + ) + ) + + return output def generate_script_from_json(model_input, outfile=None): @@ -1379,67 +1299,76 @@ def generate_script_from_json(model_input, outfile=None): """ + warnings.warn( + 'generate_script_from_json is replaced by generate_script_from_mdf and will be removed in a future version', + FutureWarning + ) + return generate_script_from_mdf(model_input, outfile) + + +def generate_script_from_mdf(model_input, outfile=None): + """ + Generate a Python script from MDF model **model_input** - def get_declared_identifiers(graphs_dict): - names = set() + .. warning:: + Use of `generate_script_from_mdf` to generate a Python script from a model without taking proper precautions + can introduce a security risk to the system on which the Python interpreter is running. This is because it + calls exec, which has the potential to execute non-PsyNeuLink-related code embedded in the file. Therefore, + `generate_script_from_mdf` should be used to read only model of known and secure origin. - for comp_name, composition_dict in graphs_dict.items(): - try: - assert comp_name == composition_dict['name'] - except KeyError: - pass + Arguments + --------- - names.add(parse_valid_identifier(comp_name)) - for name, node in composition_dict[MODEL_SPEC_ID_NODES].items(): - if MODEL_SPEC_ID_COMPOSITION in node: - names.update( - get_declared_identifiers( - node[MODEL_SPEC_ID_COMPOSITION] - ) - ) + model_input : modeci_mdf.Model - names.add(parse_valid_identifier(name)) + Returns + ------- + + Text of Python script : str + """ + import modeci_mdf.mdf as mdf + from modeci_mdf.utils import load_mdf + + def get_declared_identifiers(model): + names = set() + + for graph in model.graphs: + names.add(parse_valid_identifier(graph.id)) + for node in graph.nodes: + if isinstance(node, mdf.Graph): + names.update(get_declared_identifiers(graph)) + + names.add(parse_valid_identifier(node.id)) return names # accept either json string or filename try: - model_input = open(model_input, 'r').read() - except (FileNotFoundError, OSError): - pass - - try: - model_input = json.loads(model_input) - except json.decoder.JSONDecodeError: - raise ValueError( - f'{model_input} is neither valid JSON nor a file containing JSON' - ) - - assert len(model_input.keys()) == 1 - model_input = model_input[list(model_input.keys())[0]] + model = load_mdf(model_input) + except (FileNotFoundError, OSError, ValueError): + try: + model = mdf.Model.from_json(model_input) + except json.decoder.JSONDecodeError: + # assume yaml + # delete=False because of problems with reading file on windows + with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as f: + f.write(model_input) + model = load_mdf(f.name) imports_str = '' - if MODEL_SPEC_ID_COMPOSITION in model_input: - # maps declared names to whether they are accessible in the script - # locals. that is, each of these will be names specified in the - # composition and subcomposition nodes, and their value in this dict - # will correspond to True if they can be referenced by this name in the - # script - component_identifiers = { - i: False - for i in get_declared_identifiers(model_input[MODEL_SPEC_ID_COMPOSITION]) - } + comp_strs = [] + # maps declared names to whether they are accessible in the script + # locals. that is, each of these will be names specified in the + # composition and subcomposition nodes, and their value in this dict + # will correspond to True if they can be referenced by this name in the + # script + component_identifiers = { + i: False + for i in get_declared_identifiers(model) + } - comp_str = _generate_composition_string( - model_input[MODEL_SPEC_ID_COMPOSITION], - component_identifiers - ) - else: - comp_str = _generate_component_string( - model_input, - component_identifiers={}, - assignment=True - ) + for graph in model.graphs: + comp_strs.append(_generate_composition_string(graph, component_identifiers)) module_friendly_name_mapping = { 'psyneulink': 'pnl', @@ -1447,92 +1376,98 @@ def get_declared_identifiers(graphs_dict): 'numpy': 'np' } + potential_module_names = set() module_names = set() + model_output = [] + + for i in range(len(comp_strs)): + # greedy and non-greedy + for cs in comp_strs[i]: + potential_module_names = set([ + *re.findall(r'([A-Za-z_\.]+)\.', cs), + *re.findall(r'([A-Za-z_\.]+?)\.', cs) + ]) - # greedy and non-greedy - potential_module_names = set([ - *re.findall(r'([A-Za-z_\.]+)\.', comp_str), - *re.findall(r'([A-Za-z_\.]+?)\.', comp_str) - ]) - for module in potential_module_names: - if module not in component_identifiers: - try: - exec(f'import {module}') - module_names.add(module) - except (ImportError, ModuleNotFoundError, SyntaxError): - pass + for module in potential_module_names: + if module not in component_identifiers: + try: + exec(f'import {module}') + module_names.add(module) + except (ImportError, ModuleNotFoundError, SyntaxError): + pass - for module in module_names.copy(): - try: - friendly_name = module_friendly_name_mapping[module] - comp_str = re.sub(f'{module}\\.', f'{friendly_name}.', comp_str) - except KeyError: - friendly_name = module + for j in range(len(comp_strs[i])): + for module in module_names.copy(): + try: + friendly_name = module_friendly_name_mapping[module] + comp_strs[i][j] = re.sub(f'{module}\\.', f'{friendly_name}.', comp_strs[i][j]) + except KeyError: + pass - if not re.findall(rf'[^\.]{friendly_name}\.', comp_str): - module_names.remove(module) + for m in module_names.copy(): + for n in module_names.copy(): + # remove potential modules that are substrings of another + if m is not n and m in n: + module_names.remove(m) - for m in module_names.copy(): - for n in module_names.copy(): - # remove potential modules that are substrings of another - if m is not n and m in n: - module_names.remove(m) + for module in sorted(module_names): + try: + friendly_name = module_friendly_name_mapping[module] + except KeyError: + friendly_name = module - for module in sorted(module_names): - try: - friendly_name = module_friendly_name_mapping[module] - except KeyError: - friendly_name = module + imports_str += 'import {0}{1}\n'.format( + module, + f' as {friendly_name}' if friendly_name != module else '' + ) - imports_str += 'import {0}{1}\n'.format( - module, - f' as {friendly_name}' if friendly_name != module else '' - ) + comp_strs[i] = '\n'.join(comp_strs[i]) model_output = '{0}{1}{2}'.format( imports_str, '\n' if len(imports_str) > 0 else '', - comp_str + '\n'.join(comp_strs) ) if outfile is not None: # pass through any file exceptions with open(outfile, 'w') as outfile: outfile.write(model_output) - print(f'Wrote JSON to {outfile.name}') + print(f'Wrote script to {outfile.name}') else: return model_output -def generate_script_from_mdf(model_input, outfile=None): +def generate_json(*compositions, simple_edge_format=True): """ - Generate a Python script from MDF model **model_input** - - .. warning:: - Use of `generate_script_from_mdf` to generate a Python script from a model without taking proper precautions - can introduce a security risk to the system on which the Python interpreter is running. This is because it - calls exec, which has the potential to execute non-PsyNeuLink-related code embedded in the file. Therefore, - `generate_script_from_mdf` should be used to read only model of known and secure origin. - - Arguments - --------- - - model_input : modeci_mdf.Model + Generate the `general JSON format ` + for one or more `Compositions ` and associated + objects. + .. _MDF_Write_Multiple_Compositions_Note: - Returns - ------- + .. note:: + At present, if more than one Composition is specified, all + must be fully disjoint; that is, they must not share any + `Components ` (e.g., `Mechanism`, `Projections` + etc.). This limitation will be addressed in a future update. - Text of Python script : str + Arguments: + *compositions : Composition + specifies `Composition` or iterable of ones to be output + in JSON """ - return generate_script_from_json(model_input.to_json(), outfile) + warnings.warn( + 'generate_json is replaced by get_mdf_serialized and will be removed in a future version', + FutureWarning + ) + return get_mdf_serialized(*compositions, fmt='json', simple_edge_format=simple_edge_format) -def generate_json(*compositions, simple_edge_format=True): +def get_mdf_serialized(*compositions, fmt='json', simple_edge_format=True): """ - Generate the `general JSON format ` + Generate the `general MDF serialized format ` for one or more `Compositions ` and associated objects. - .. _JSON_Write_Multiple_Compositions_Note: .. note:: At present, if more than one Composition is specified, all @@ -1543,28 +1478,23 @@ def generate_json(*compositions, simple_edge_format=True): Arguments: *compositions : Composition specifies `Composition` or iterable of ones to be output - in JSON - """ - import modeci_mdf - import modeci_mdf.mdf as mdf - from psyneulink.core.compositions.composition import Composition + in **fmt** - model_name = "_".join([c.name for c in compositions]) + fmt : str + specifies file format of output. Current options ('json', 'yml'/'yaml') - model = mdf.Model( - id=model_name, - format=f'ModECI MDF v{modeci_mdf.__version__}', - generating_application=f'PsyNeuLink v{psyneulink.__version__}', - ) - - for c in compositions: - if not isinstance(c, Composition): - raise PNLJSONError( - f'Item in compositions arg of {__name__}() is not a Composition: {c}.' - ) - model.graphs.append(c.as_mdf_model(simple_edge_format=simple_edge_format)) + simple_edge_format : bool + specifies use of + `simple edge format ` or not + """ + model = get_mdf_model(*compositions, simple_edge_format=simple_edge_format) - return model.to_json() + try: + return getattr(model, f'to_{supported_formats[fmt]}')() + except AttributeError as e: + raise ValueError( + f'Unsupported MDF output format "{fmt}". Supported formats: {gen_friendly_comma_str(supported_formats.keys())}' + ) from e def write_json_file(compositions, filename:str, path:str=None, simple_edge_format=True): @@ -1572,7 +1502,7 @@ def write_json_file(compositions, filename:str, path:str=None, simple_edge_forma Write one or more `Compositions ` and associated objects to file in the `general JSON format ` - .. _JSON_Write_Multiple_Compositions_Note: + .. _MDF_Write_Multiple_Compositions_Note: .. note:: At present, if more than one Composition is specified, all must be fully disjoint; that is, they must not @@ -1592,8 +1522,103 @@ def write_json_file(compositions, filename:str, path:str=None, simple_edge_forma specifies path of file for JSON specification; if it is not specified then the current directory is used. """ + warnings.warn( + 'write_json_file is replaced by write_mdf_file and will be removed in a future version', + FutureWarning + ) + write_mdf_file(compositions, filename, path, 'json', simple_edge_format) + + +def write_mdf_file(compositions, filename: str, path: str = None, fmt: str = None, simple_edge_format: bool = True): + """ + Write the `general MDF serialized format ` + for one or more `Compositions ` and associated + objects to file. + + .. note:: + At present, if more than one Composition is specified, all + must be fully disjoint; that is, they must not share any + `Components ` (e.g., `Mechanism`, `Projections` + etc.). This limitation will be addressed in a future update. + + Arguments: + compositions : Composition or list + specifies `Composition` or list of ones to be written to + **filename** + + filename : str + specifies name of file in which to write MDF + specification of `Composition(s) ` and + associated objects. + + path : str : default None + specifies path of file for MDF specification; if it is + not specified then the current directory is used. + fmt : str + specifies file format of output. Current options ('json', 'yml'/'yaml') + + simple_edge_format : bool + specifies use of + `simple edge format ` or not + """ compositions = convert_to_list(compositions) + model = get_mdf_model(*compositions, simple_edge_format=simple_edge_format) + + if fmt is None: + try: + fmt = re.match(r'(.*)\.(.*)$', filename).groups(1) + except AttributeError: + fmt = 'json' + + if path is not None: + filename = os.path.join(path, filename) + + try: + return getattr(model, f'to_{supported_formats[fmt]}_file')(filename) + except AttributeError as e: + raise ValueError( + f'Unsupported MDF output format "{fmt}". Supported formats: {gen_friendly_comma_str(supported_formats.keys())}' + ) from e + + +def get_mdf_model(*compositions, simple_edge_format=True): + """ + Generate the MDF Model object for one or more + `Compositions ` and associated objects. + + .. note:: + At present, if more than one Composition is specified, all + must be fully disjoint; that is, they must not share any + `Components ` (e.g., `Mechanism`, `Projections` + etc.). This limitation will be addressed in a future update. + + Arguments: + *compositions : Composition + specifies `Composition` or iterable of ones to be output + in the Model + + simple_edge_format : bool + specifies use of + `simple edge format ` or not + """ + import modeci_mdf + import modeci_mdf.mdf as mdf + from psyneulink.core.compositions.composition import Composition + + model_name = "_".join([c.name for c in compositions]) + + model = mdf.Model( + id=model_name, + format=f'ModECI MDF v{modeci_mdf.__version__}', + generating_application=f'PsyNeuLink v{psyneulink.__version__}', + ) + + for c in compositions: + if not isinstance(c, Composition): + raise MDFError( + f'Item in compositions arg of {__name__}() is not a Composition: {c}.' + ) + model.graphs.append(c.as_mdf_model(simple_edge_format=simple_edge_format)) - with open(filename, 'w') as json_file: - json_file.write(generate_json(*compositions, simple_edge_format=simple_edge_format)) + return model diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index d7cc7233a38..819db375349 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -99,7 +99,10 @@ class B(A): class Parameters(A.Parameters): p = 1.0 - q = Parameter(1.0, modulable=True) + q = Parameter() + + def __init__(p=None, q=1.0): + super(p=p, q=q) - create an inner class Parameters on the Component, inheriting from the parent Component's Parameters class @@ -108,6 +111,8 @@ class Parameters(A.Parameters): - as with *p*, specifying only a value uses default values for the attributes of the Parameter - as with *q*, specifying an explicit instance of the Parameter class allows you to modify the `Parameter attributes ` +- default values for the parameters can be specified in the Parameters class body, or in the + arguments for *B*.__init__. If both are specified and the values differ, an exception will be raised - if you want assignments to parameter *p* to be validated, add a method _validate_p(value), that returns None if value is a valid assignment, or an error string if value is not a valid assignment - if you want all values set to *p* to be parsed beforehand, add a method _parse_p(value) that returns the parsed value @@ -295,6 +300,8 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co import collections import copy +import functools +import inspect import itertools import logging import types @@ -307,7 +314,7 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co from psyneulink.core.globals.context import time as time_object from psyneulink.core.globals.log import LogCondition, LogEntry, LogError from psyneulink.core.globals.utilities import call_with_pruned_args, copy_iterable_with_shared, \ - get_alias_property_getter, get_alias_property_setter, get_deepcopy_with_shared, unproxy_weakproxy, create_union_set + get_alias_property_getter, get_alias_property_setter, get_deepcopy_with_shared, unproxy_weakproxy, create_union_set, safe_equals, get_function_sig_default_value from psyneulink.core.rpc.graph_pb2 import Entry, ndArray __all__ = [ @@ -392,6 +399,92 @@ def copy_parameter_value(value, shared_types=None, memo=None): return value +def get_init_signature_default_value(obj, parameter): + """ + Returns: + the default value of the **parameter** argument of + the __init__ method of **obj** if it exists, or inspect._empty + """ + # only use the signature if it's on the owner class, not a parent + if '__init__' in obj.__dict__: + return get_function_sig_default_value(obj.__init__, parameter) + else: + return inspect._empty + + +def check_user_specified(func): + @functools.wraps(func) + def check_user_specified_wrapper(self, *args, **kwargs): + if 'params' in kwargs and kwargs['params'] is not None: + orig_kwargs = copy.copy(kwargs) + kwargs = {**kwargs, **kwargs['params']} + del kwargs['params'] + else: + orig_kwargs = kwargs + + # find the corresponding constructor in chained wrappers + constructor = func + while '__init__' not in constructor.__qualname__: + constructor = constructor.__wrapped__ + + for k, v in kwargs.items(): + try: + p = getattr(self.parameters, k) + except AttributeError: + pass + else: + if k == p.constructor_argument: + kwargs[p.name] = v + + try: + self._user_specified_args + except AttributeError: + self._prev_constructor = constructor if '__init__' in type(self).__dict__ else None + self._user_specified_args = copy.copy(kwargs) + else: + # add args determined in constructor to user_specifed. + # since some args are set by the values of other + # user_specified args in a constructor, we label these as + # user_specified also (ex. LCAMechanism hetero/competition) + for k, v in kwargs.items(): + # we only know changes in passed parameter values after + # calling the next __init__ in the hierarchy, so can + # only check _prev_constructor + if k not in self._user_specified_args and self._prev_constructor is not None: + prev_constructor_default = get_function_sig_default_value( + self._prev_constructor, k + ) + if ( + # arg value passed through constructor is + # different than default arg in signature + ( + type(prev_constructor_default) != type(v) + or not safe_equals(prev_constructor_default, v) + ) + # arg value is different than the value given + # from the previous constructor in the class + # hierarchy + and ( + k not in self._prev_kwargs + or ( + type(self._prev_kwargs[k]) != type(v) + or not safe_equals(self._prev_kwargs[k], v) + ) + ) + ): + # NOTE: this is a good place to identify + # potentially unnecessary/inconsistent default + # parameter settings in body of constructors + self._user_specified_args[k] = v + + self._prev_constructor = constructor + + self._prev_kwargs = kwargs + return func(self, *args, **orig_kwargs) + + return check_user_specified_wrapper + + class ParametersTemplate: _deepcopy_shared_keys = ['_parent', '_params', '_owner_ref', '_children'] _values_default_excluded_attrs = {'user': False} @@ -572,7 +665,6 @@ def __getattr__(self, attr): def __setattr__(self, attr, value): if (attr[:1] != '_'): param = getattr(self._owner.parameters, attr) - param._inherited = False param.default_value = value else: super().__setattr__(attr, value) @@ -796,6 +888,12 @@ class Parameter(ParameterBase): :default: None + specify_none + if True, a user-specified value of None for this Parameter + will set the _user_specified flag to True + + :default: False + """ # The values of these attributes will never be inherited from parent Parameters # KDM 7/12/18: consider inheriting ONLY default_value? @@ -823,7 +921,6 @@ class Parameter(ParameterBase): 'default_value', 'history_max_length', 'log_condition', - 'delivery_condition', 'spec', } @@ -861,6 +958,7 @@ def __init__( initializer=None, port=None, mdf_name=None, + specify_none=False, _owner=None, _inherited=False, # this stores a reference to the Parameter object that is the @@ -925,9 +1023,11 @@ def __init__( initializer=initializer, port=port, mdf_name=mdf_name, + specify_none=specify_none, _inherited=_inherited, _inherited_source=_inherited_source, _user_specified=_user_specified, + _temp_uninherited=set(), **kwargs ) @@ -1012,10 +1112,15 @@ def __getattr__(self, attr): def __setattr__(self, attr, value): if attr in self._additional_param_attr_properties: + self._temp_uninherited.add(attr) + self._inherited = False + try: getattr(self, '_set_{0}'.format(attr))(value) except AttributeError: super().__setattr__(attr, value) + + self._temp_uninherited.remove(attr) else: super().__setattr__(attr, value) @@ -1024,20 +1129,34 @@ def reset(self): Resets *default_value* to the value specified in its `Parameters` class declaration, or inherits from parent `Parameters` classes if it is not explicitly specified. """ - try: - self.default_value = self._owner.__class__.__dict__[self.name].default_value - except (AttributeError, KeyError): + # check for default in Parameters class + cls_param_value = inspect._empty + if self._owner._param_is_specified_in_class(self.name): try: - self.default_value = self._owner.__class__.__dict__[self.name] + cls_param_value = self._owner.__class__.__dict__[self.name] except KeyError: - if self._parent is not None: - self._inherited = True - else: - raise ParameterError( - 'Parameter {0} cannot be reset, as it does not have a default specification ' - 'or a parent. This may occur if it was added dynamically rather than in an' - 'explict Parameters inner class on a Component' - ) + pass + else: + try: + cls_param_value = cls_param_value.default_value + except AttributeError: + pass + + # check for default in __init__ signature + value = self._owner._reconcile_value_with_init_default(self.name, cls_param_value) + if value is not inspect._empty: + self.default_value = value + return + + # no default specified, must be inherited or invalid + if self._parent is not None: + self._inherited = True + else: + raise ParameterError( + 'Parameter {0} cannot be reset, as it does not have a default specification ' + 'or a parent. This may occur if it was added dynamically rather than in an' + 'explict Parameters inner class on a Component' + ) def _register_alias(self, name): if self.aliases is None: @@ -1054,6 +1173,7 @@ def _inherited(self, value): if value is not self._inherited: # invalid if set to inherited self._is_invalid_source = value + self.__inherited = value if value: self._cache_inherited_attrs() @@ -1079,14 +1199,14 @@ def _inherited(self, value): self._restore_inherited_attrs() - self.__inherited = value - def _inherit_from(self, parent): self._inherited_source = weakref.ref(parent) def _cache_inherited_attrs(self, exclusions=None): if exclusions is None: - exclusions = self._uninherited_attrs + exclusions = set() + + exclusions = self._uninherited_attrs.union(self._temp_uninherited).union(exclusions) for attr in self._param_attrs: if attr not in exclusions: @@ -1095,7 +1215,9 @@ def _cache_inherited_attrs(self, exclusions=None): def _restore_inherited_attrs(self, exclusions=None): if exclusions is None: - exclusions = self._uninherited_attrs + exclusions = set() + + exclusions = self._uninherited_attrs.union(self._temp_uninherited).union(exclusions) for attr in self._param_attrs: if ( @@ -1787,12 +1909,12 @@ def __setattr__(self, attr, value): def _cache_inherited_attrs(self): super()._cache_inherited_attrs( - exclusions=self._uninherited_attrs.union(self._sourced_attrs) + exclusions=self._sourced_attrs ) def _restore_inherited_attrs(self): super()._restore_inherited_attrs( - exclusions=self._uninherited_attrs.union(self._sourced_attrs) + exclusions=self._sourced_attrs ) def _set_name(self, name): @@ -1944,16 +2066,20 @@ class ParametersBase(ParametersTemplate): _validation_method_prefix = '_validate_' def __init__(self, owner, parent=None): + self._initializing = True + super().__init__(owner=owner, parent=parent) aliases_to_create = set() for param_name, param_value in self.values(show_all=True).items(): + constructor_default = get_init_signature_default_value(self._owner, param_name) + if ( - param_name in self.__class__.__dict__ - and ( - param_name not in self._parent.__class__.__dict__ - or self._parent.__class__.__dict__[param_name] is not self.__class__.__dict__[param_name] + ( + constructor_default is not None + and constructor_default is not inspect._empty ) + or self._param_is_specified_in_class(param_name) ): # KDM 6/25/18: NOTE: this may need special handling if you're creating a ParameterAlias directly # in a class's Parameters class @@ -1979,6 +2105,8 @@ def __init__(self, owner, parent=None): for param, value in self.values(show_all=True).items(): self._validate(param, value.default_value) + self._initializing = False + def __getattr__(self, attr): def throw_error(): try: @@ -2017,10 +2145,20 @@ def __setattr__(self, attr, value): super().__setattr__(attr, value) else: if isinstance(value, Parameter): + if value._owner is None: + value._owner = self + elif value._owner is not self and self._initializing: + # case where no Parameters class defined on subclass + # but default value overridden in __init__ + value = copy.deepcopy(value) + value._owner = self + if value.name is None: value.name = attr - value._owner = self + if self._initializing and not value._inherited: + value.default_value = self._reconcile_value_with_init_default(attr, value.default_value) + super().__setattr__(attr, value) if value.aliases is not None: @@ -2072,6 +2210,9 @@ def __setattr__(self, attr, value): except AttributeError: current_value = None + if self._initializing: + value = self._reconcile_value_with_init_default(attr, value) + # assign value to default_value if isinstance(current_value, (Parameter, ParameterAlias)): # construct a copy because the original may be used as a base for reset() @@ -2092,6 +2233,39 @@ def __setattr__(self, attr, value): self._validate(attr, getattr(self, attr).default_value) self._register_parameter(attr) + def _reconcile_value_with_init_default(self, attr, value): + constructor_default = get_init_signature_default_value(self._owner, attr) + if constructor_default is not None and constructor_default is not inspect._empty: + if ( + value is None + or not self._param_is_specified_in_class(attr) + or ( + type(constructor_default) == type(value) + and safe_equals(constructor_default, value) + ) + ): + # TODO: consider placing a developer-focused warning here? + return constructor_default + else: + assert False, ( + 'PROGRAM ERROR: ' + f'Conflicting default parameter values assigned for Parameter {attr} of {self._owner} in:' + f'\n\t{self._owner}.Parameters: {value}' + f'\n\t{self._owner}.__init__: {constructor_default}' + f'\nRemove one of these assignments. Prefer removing the default_value of {attr} in {self._owner}.Parameters' + ) + + return value + + def _param_is_specified_in_class(self, param_name): + return ( + param_name in self.__class__.__dict__ + and ( + param_name not in self._parent.__class__.__dict__ + or self._parent.__class__.__dict__[param_name] is not self.__class__.__dict__[param_name] + ) + ) + def _get_prefixed_method( self, parse=False, diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index a77f319061c..84fd6a73f93 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -144,6 +144,7 @@ ] logger = logging.getLogger(__name__) +_signature_cache = weakref.WeakKeyDictionary() class UtilitiesError(Exception): @@ -1672,9 +1673,6 @@ def _get_arg_from_stack(arg_name:str): return arg_val -_unused_args_sig_cache = weakref.WeakKeyDictionary() - - def prune_unused_args(func, args=None, kwargs=None): """ Arguments @@ -1695,10 +1693,10 @@ def prune_unused_args(func, args=None, kwargs=None): """ # use the func signature to filter out arguments that aren't compatible try: - sig = _unused_args_sig_cache[func] + sig = _signature_cache[func] except KeyError: sig = inspect.signature(func) - _unused_args_sig_cache[func] = sig + _signature_cache[func] = sig has_args_param = False has_kwargs_param = False @@ -1943,3 +1941,24 @@ def _is_module_class(class_: type, module: types.ModuleType) -> bool: pass return False + + +def get_function_sig_default_value( + function: typing.Union[types.FunctionType, types.MethodType], + parameter: str +): + """ + Returns: + the default value of the **parameter** argument of + **function** if it exists, or inspect._empty + """ + try: + sig = _signature_cache[function] + except KeyError: + sig = inspect.signature(function) + _signature_cache[function] = sig + + try: + return sig.parameters[parameter].default + except KeyError: + return inspect._empty diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index f59a46e4dde..a62f8c875e3 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -158,12 +158,15 @@ def cuda_max_block_size(self, override): def cuda_call(self, *args, threads=1, block_size=None): block_size = self.cuda_max_block_size(block_size) grid = ((threads + block_size - 1) // block_size, 1) - self._cuda_kernel(*args, np.int32(threads), - block=(block_size, 1, 1), grid=grid) + ktime = self._cuda_kernel(*args, np.int32(threads), time_kernel="time_stat" in debug_env, + block=(block_size, 1, 1), grid=grid) + if "time_stat" in debug_env: + print("Time to run kernel '{}' using {} threads: {}".format( + self.name, threads, ktime)) - def cuda_wrap_call(self, *args, threads=1, block_size=None): + def cuda_wrap_call(self, *args, **kwargs): wrap_args = (jit_engine.pycuda.driver.InOut(a) if isinstance(a, np.ndarray) else a for a in args) - self.cuda_call(*wrap_args, threads=threads, block_size=block_size) + self.cuda_call(*wrap_args, **kwargs) @staticmethod @functools.lru_cache(maxsize=32) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index 6fa5af54287..8695d1e5347 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -55,8 +55,9 @@ def module_count(): _BUILTIN_PREFIX = "__pnl_builtin_" -_builtin_intrinsics = frozenset(('pow', 'log', 'exp', 'tanh', 'coth', 'csch', 'is_close', 'mt_rand_init', - 'philox_rand_init')) +_builtin_intrinsics = frozenset(('pow', 'log', 'exp', 'tanh', 'coth', 'csch', + 'is_close_float', 'is_close_double', + 'mt_rand_init', 'philox_rand_init')) class _node_wrapper(): @@ -198,7 +199,7 @@ def get_builtin(self, name: str, args=[], function_type=None): if name in _builtin_intrinsics: return self.import_llvm_function(_BUILTIN_PREFIX + name) if name in ('maxnum'): - function_type = pnlvm.ir.FunctionType(args[0], [args[0], args[0]]) + function_type = ir.FunctionType(args[0], [args[0], args[0]]) return self.module.declare_intrinsic("llvm." + name, args, function_type) def create_llvm_function(self, args, component, name=None, *, return_type=ir.VoidType(), tags:frozenset=frozenset()): @@ -206,21 +207,20 @@ def create_llvm_function(self, args, component, name=None, *, return_type=ir.Voi # Builtins are already unique and need to keep their special name func_name = name if name.startswith(_BUILTIN_PREFIX) else self.get_unique_name(name) - func_ty = pnlvm.ir.FunctionType(return_type, args) - llvm_func = pnlvm.ir.Function(self.module, func_ty, name=func_name) + func_ty = ir.FunctionType(return_type, args) + llvm_func = ir.Function(self.module, func_ty, name=func_name) llvm_func.attributes.add('argmemonly') for a in llvm_func.args: if isinstance(a.type, ir.PointerType): a.attributes.add('nonnull') metadata = self.get_debug_location(llvm_func, component) - if metadata is not None: - scope = dict(metadata.operands)["scope"] - llvm_func.set_metadata("dbg", scope) + scope = dict(metadata.operands)["scope"] + llvm_func.set_metadata("dbg", scope) # Create entry block block = llvm_func.append_basic_block(name="entry") - builder = pnlvm.ir.IRBuilder(block) + builder = ir.IRBuilder(block) builder.debug_metadata = metadata return builder @@ -262,12 +262,9 @@ def get_random_state_ptr(self, builder, component, state, params): used_seed = builder.load(used_seed_ptr) seed_ptr = helpers.get_param_ptr(builder, component, params, "seed") - if isinstance(seed_ptr.type.pointee, ir.ArrayType): - # Modulated params are usually single element arrays - seed_ptr = builder.gep(seed_ptr, [self.int32_ty(0), self.int32_ty(0)]) - new_seed = builder.load(seed_ptr) + new_seed = pnlvm.helpers.load_extract_scalar_array_one(builder, seed_ptr) # FIXME: The seed should ideally be integer already. - # However, it can be modulated and we don't support, + # However, it can be modulated and we don't support # passing integer values as computed results. new_seed = builder.fptoui(new_seed, used_seed.type) @@ -286,9 +283,6 @@ def get_random_state_ptr(self, builder, component, state, params): @staticmethod def get_debug_location(func: ir.Function, component): - if "debug_info" not in debug_env: - return - mod = func.module path = inspect.getfile(component.__class__) if component is not None else "" d_version = mod.add_metadata([ir.IntType(32)(2), "Dwarf Version", ir.IntType(32)(4)]) @@ -327,6 +321,14 @@ def get_debug_location(func: ir.Function, component): }) return di_loc + @staticmethod + def update_debug_loc_position(di_loc: ir.DIValue, line:int, column:int): + di_func = dict(di_loc.operands)["scope"] + + return di_loc.parent.add_debug_info("DILocation", { + "line": line, "column": column, "scope": di_func, + }) + @_comp_cached def get_input_struct_type(self, component): self._stats["input_structs_generated"] += 1 @@ -615,6 +617,10 @@ def _convert_llvm_ir_to_ctype(t: ir.Type): return ctypes.c_double elif type_t is ir.FloatType: return ctypes.c_float + elif type_t is ir.HalfType: + # There's no half type in ctypes. Use uint16 instead. + # User will need to do the necessary casting. + return ctypes.c_uint16 elif type_t is ir.PointerType: pointee = _convert_llvm_ir_to_ctype(t.pointee) ret_t = ctypes.POINTER(pointee) diff --git a/psyneulink/core/llvm/builtins.py b/psyneulink/core/llvm/builtins.py index a64be3d4c6a..30973992713 100644 --- a/psyneulink/core/llvm/builtins.py +++ b/psyneulink/core/llvm/builtins.py @@ -11,14 +11,10 @@ from llvmlite import ir -from . import debug from . import helpers from .builder_context import LLVMBuilderContext, _BUILTIN_PREFIX -debug_env = debug.debug_env - - def _setup_builtin_func_builder(ctx, name, args, *, return_type=ir.VoidType()): builder = ctx.create_llvm_function(args, None, _BUILTIN_PREFIX + name, return_type=return_type) @@ -389,24 +385,27 @@ def setup_mat_add(ctx): def setup_is_close(ctx): - builder = _setup_builtin_func_builder(ctx, "is_close", [ctx.float_ty, - ctx.float_ty, - ctx.float_ty, - ctx.float_ty], - return_type=ctx.bool_ty) - val1, val2, rtol, atol = builder.function.args + # Make sure we always have fp64 variant + for float_ty in {ctx.float_ty, ir.DoubleType()}: + name = "is_close_{}".format(float_ty) + builder = _setup_builtin_func_builder(ctx, name, [float_ty, + float_ty, + float_ty, + float_ty], + return_type=ctx.bool_ty) + val1, val2, rtol, atol = builder.function.args - fabs_f = ctx.get_builtin("fabs", [val2.type]) + fabs_f = ctx.get_builtin("fabs", [val2.type]) - diff = builder.fsub(val1, val2, "is_close_diff") - abs_diff = builder.call(fabs_f, [diff], "is_close_abs") + diff = builder.fsub(val1, val2, "is_close_diff") + abs_diff = builder.call(fabs_f, [diff], "is_close_abs") - abs2 = builder.call(fabs_f, [val2], "abs_val2") + abs2 = builder.call(fabs_f, [val2], "abs_val2") - rtol = builder.fmul(rtol, abs2, "is_close_rtol") - tol = builder.fadd(rtol, atol, "is_close_atol") - res = builder.fcmp_ordered("<=", abs_diff, tol, "is_close_cmp") - builder.ret(res) + rtol = builder.fmul(rtol, abs2, "is_close_rtol") + tol = builder.fadd(rtol, atol, "is_close_atol") + res = builder.fcmp_ordered("<=", abs_diff, tol, "is_close_cmp") + builder.ret(res) def setup_csch(ctx): @@ -415,11 +414,14 @@ def setup_csch(ctx): x = builder.function.args[0] exp_f = ctx.get_builtin("exp", [x.type]) # (2e**x)/(e**2x - 1) + # 2/(e**x - e**-x) ex = builder.call(exp_f, [x]) - num = builder.fmul(ex.type(2), ex) - _2x = builder.fmul(x.type(2), x) - e2x = builder.call(exp_f, [_2x]) - den = builder.fsub(e2x, e2x.type(1)) + + nx = helpers.fneg(builder, x) + enx = builder.call(exp_f, [nx]) + den = builder.fsub(ex, enx) + num = den.type(2) + res = builder.fdiv(num, den) builder.ret(res) @@ -429,12 +431,13 @@ def setup_tanh(ctx): return_type=ctx.float_ty) x = builder.function.args[0] exp_f = ctx.get_builtin("exp", [x.type]) - # (e**2x - 1)/(e**2x + 1) + # (e**2x - 1)/(e**2x + 1) is faster but doesn't handle large inputs (exp -> Inf) well (Inf/Inf = NaN) + # (1 - (2/(exp(2*x) + 1))) is a bit slower but handles large inputs better _2x = builder.fmul(x.type(2), x) e2x = builder.call(exp_f, [_2x]) - num = builder.fsub(e2x, e2x.type(1)) den = builder.fadd(e2x, e2x.type(1)) - res = builder.fdiv(num, den) + res = builder.fdiv(den.type(2), den) + res = builder.fsub(res.type(1), res) builder.ret(res) @@ -443,12 +446,14 @@ def setup_coth(ctx): return_type=ctx.float_ty) x = builder.function.args[0] exp_f = ctx.get_builtin("exp", [x.type]) + # (e**2x + 1)/(e**2x - 1) is faster but doesn't handle large inputs (exp -> Inf) well (Inf/Inf = NaN) + # (1 + (2/(exp(2*x) - 1))) is a bit slower but handles large inputs better # (e**2x + 1)/(e**2x - 1) _2x = builder.fmul(x.type(2), x) e2x = builder.call(exp_f, [_2x]) - num = builder.fadd(e2x, e2x.type(1)) den = builder.fsub(e2x, e2x.type(1)) - res = builder.fdiv(num, den) + res = builder.fdiv(den.type(2), den) + res = builder.fadd(res.type(1), res) builder.ret(res) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 7b8258e077b..76f29f8bbfb 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -75,6 +75,10 @@ def np_cmp(builder, x, y): self.name_constants = name_constants super().__init__() + def _update_debug_metadata(self, builder: ir.IRBuilder, node:ast.AST): + builder.debug_metadata = self.ctx.update_debug_loc_position(builder.debug_metadata, + node.lineno, + node.col_offset) def get_rval(self, val): if helpers.is_pointer(val): return self.builder.load(val) @@ -99,18 +103,22 @@ def visit_arguments(self, node): else: self.register[param.arg] = self.func_params[param.arg] - def visit_FunctionDef(self, node): + def visit_FunctionDef(self, node:ast.AST): # the current position will be used to create temp space # for local variables. This block dominates all others # generated by this visitor. self.var_builder = self.builder + self._update_debug_metadata(self.var_builder, node) # Create a new basic block to house the generated code udf_block = self.builder.append_basic_block(name="udf_body") self.builder = ir.IRBuilder(udf_block) + self.builder.debug_metadata = self.var_builder.debug_metadata super().generic_visit(node) + self._update_debug_metadata(self.builder, node) + if not self.builder.block.is_terminated: # the function didn't use return as the last statement # e.g. only includes 'return' statements in if blocks @@ -120,10 +128,11 @@ def visit_FunctionDef(self, node): self.var_builder.branch(udf_block) return self.builder - def visit_Lambda(self, node): + def visit_Lambda(self, node:ast.AST): self.visit(node.args) expr = self.visit(node.body) + self._update_debug_metadata(self.builder, node) # store the lambda expression in the result and terminate self.builder.store(expr, self.arg_out) self.builder.ret_void() @@ -197,9 +206,10 @@ def _not(builder, x): def visit_Name(self, node): return self.register.get(node.id, None) - def visit_Attribute(self, node): + def visit_Attribute(self, node:ast.AST): val = self.visit(node.value) + self._update_debug_metadata(self.builder, node) # special case numpy attributes if node.attr == "shape": shape = helpers.get_array_shape(val) @@ -233,11 +243,17 @@ def visit_Num(self, node): return self.ctx.float_ty(node.n) def visit_Assign(self, node): - value = self.get_rval(self.visit(node.value)) + value = self.visit(node.value) + + self._update_debug_metadata(self.builder, node) + value = self.get_rval(value) for t in node.targets: target = self.visit(t) + # Visiting 't' might have changed code location metadata + self._update_debug_metadata(self.builder, node) if target is None: # Allocate space for new variable + self._update_debug_metadata(self.var_builder, node) target = self.var_builder.alloca(value.type, name=str(t.id) + '_local_variable') self.register[t.id] = target assert self.is_lval(target) @@ -248,10 +264,13 @@ def visit_NameConstant(self, node): assert val, f"Failed to convert NameConstant {node.value}" return val - def visit_Tuple(self, node): + def visit_Tuple(self, node:ast.AST): elements = (self.visit(element) for element in node.elts) + + self._update_debug_metadata(self.builder, node) element_values = [self.builder.load(element) if helpers.is_pointer(element) else element for element in elements] element_types = [element.type for element in element_values] + if len(element_types) > 0 and all(x == element_types[0] for x in element_types): result = ir.ArrayType(element_types[0], len(element_types))(ir.Undefined) else: @@ -277,9 +296,12 @@ def _do_unary_op(self, builder, x, scalar_op): return result - def visit_UnaryOp(self, node): + def visit_UnaryOp(self, node:ast.AST): operator = self.visit(node.op) - operand = self.get_rval(self.visit(node.operand)) + + operand = self.visit(node.operand) + self._update_debug_metadata(self.builder, node) + operand = self.get_rval(operand) return self._do_unary_op(self.builder, operand, operator) def _do_bin_op(self, builder, x, y, scalar_op): @@ -308,15 +330,22 @@ def _do_bin_op(self, builder, x, y, scalar_op): return res - def visit_BinOp(self, node): + def visit_BinOp(self, node:ast.AST): operator = self.visit(node.op) - lhs = self.get_rval(self.visit(node.left)) - rhs = self.get_rval(self.visit(node.right)) + lhs = self.visit(node.left) + rhs = self.visit(node.right) + + self._update_debug_metadata(self.builder, node) + lhs = self.get_rval(lhs) + rhs = self.get_rval(rhs) return self._do_bin_op(self.builder, lhs, rhs, operator) - def visit_BoolOp(self, node): + def visit_BoolOp(self, node:ast.AST): operator = self.visit(node.op) - values = (self.get_rval(self.visit(value)) for value in node.values) + values = list(self.visit(value) for value in node.values) + + self._update_debug_metadata(self.builder, node) + values = (self.get_rval(v) for v in values) ret_val = next(values) for value in values: assert ret_val.type == value.type, "Don't know how to mix types in boolean expressions!" @@ -342,7 +371,11 @@ def _or(builder, x, y): return _or def visit_List(self, node): - element_values = [self.get_rval(self.visit(element)) for element in node.elts] + elements = list(self.visit(element) for element in node.elts) + + self._update_debug_metadata(self.builder, node) + element_values = [self.get_rval(e) for e in elements] + element_types = [element.type for element in element_values] assert all(e_type == element_types[0] for e_type in element_types), f"Unable to convert {node} into a list! (Elements differ in type!)" result = ir.ArrayType(element_types[0], len(element_types))(ir.Undefined) @@ -382,17 +415,22 @@ def visit_GtE(self, node): return self._generate_fcmp_handler(self.ctx, self.builder, ">=") def visit_Compare(self, node): - result = self.get_rval(self.visit(node.left)) + res = self.visit(node.left) + comparators = list(self.visit(comparator) for comparator in node.comparators) + ops = list(self.visit(op) for op in node.ops) - comparators = (self.visit(comparator) for comparator in node.comparators) + self._update_debug_metadata(self.builder, node) + result = self.get_rval(res) values = (self.builder.load(val) if helpers.is_pointer(val) else val for val in comparators) - ops = (self.visit(op) for op in node.ops) for val, op in zip(values, ops): result = self._do_bin_op(self.builder, result, val, op) return result - def visit_If(self, node): - cond_val = self.get_rval(self.visit(node.test)) + def visit_If(self, node:ast.AST): + cond = self.visit(node.test) + + self._update_debug_metadata(self.builder, node) + cond_val = self.get_rval(cond) predicate = helpers.convert_type(self.builder, cond_val, self.ctx.bool_ty) with self.builder.if_else(predicate) as (then, otherwise): @@ -403,10 +441,11 @@ def visit_If(self, node): for child in node.orelse: self.visit(child) - def visit_Return(self, node): + def visit_Return(self, node:ast.AST): ret_val = self.visit(node.value) arg_out = self.arg_out + self._update_debug_metadata(self.builder, node) # dereference pointer if helpers.is_pointer(ret_val): ret_val = self.builder.load(ret_val) @@ -423,9 +462,11 @@ def visit_Return(self, node): self.builder.store(ret_val, arg_out) self.builder.ret_void() - def visit_Subscript(self, node): + def visit_Subscript(self, node:ast.AST): node_val = self.visit(node.value) index = self.visit(node.slice) + + self._update_debug_metadata(self.builder, node) node_slice_val = helpers.convert_type(self.builder, index, self.ctx.int32_ty) if not self.is_lval(node_val): temp_node_val = self.builder.alloca(node_val.type) @@ -434,7 +475,7 @@ def visit_Subscript(self, node): return self.builder.gep(node_val, [self.ctx.int32_ty(0), node_slice_val]) - def visit_Index(self, node): + def visit_Index(self, node:ast.AST): """ Returns the wrapped value. @@ -442,12 +483,13 @@ def visit_Index(self, node): """ return self.visit(node.value) - def visit_Call(self, node): + def visit_Call(self, node:ast.AST): node_args = [self.visit(arg) for arg in node.args] call_func = self.visit(node.func) assert callable(call_func), f"Uncallable function {node.func}!" + self._update_debug_metadata(self.builder, node) return call_func(self.builder, *node_args) # Python builtins @@ -513,7 +555,7 @@ def call_builtin_np_max(self, builder, x): x = self.get_rval(x) if helpers.is_scalar(x): return x - res = self.ctx.float_ty("-Inf") + res = self.ctx.float_ty(float("-Inf")) def find_max(builder, x): nonlocal res # to propagate NaNs we use unordered >, diff --git a/psyneulink/core/llvm/debug.py b/psyneulink/core/llvm/debug.py index 0a6788f838d..02038133f0a 100644 --- a/psyneulink/core/llvm/debug.py +++ b/psyneulink/core/llvm/debug.py @@ -23,7 +23,6 @@ * "print_values" -- Enabled printfs in llvm code (from ctx printf helper) Compilation modifiers: - * "debug_info" -- emit line debugging information when generating LLVM IR * "const_data" -- hardcode initial output values into generated code, instead of loading them from the data argument * "const_input" -- hardcode input values for composition runs diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index ff7a2defdd6..ab96adfafd4 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -48,10 +48,23 @@ def _tupleize(x): return x if x is not None else tuple() def _element_dtype(x): + """ + Extract base builtin type from aggregate type. + + Throws assertion failure if the aggregate type includes more than one base type. + The assumption is that array of builtin type has the same binary layout as + the original aggregate and it's easier to construct + """ dt = np.dtype(x) while dt.subdtype is not None: dt = dt.subdtype[0] + if not dt.isbuiltin: + fdts = (_element_dtype(f[0]) for f in dt.fields.values()) + dt = next(fdts) + assert all(dt == fdt for fdt in fdts) + + assert dt.isbuiltin, "Element type is not builtin: {} from {}".format(dt, np.dtype(x)) return dt def _pretty_size(size): @@ -683,7 +696,7 @@ def _prepare_evaluate(self, variable, num_evaluations): # Construct input variable var_dty = _element_dtype(bin_func.byref_arg_types[5]) - converted_variable = np.asfarray(np.concatenate(variable), dtype=var_dty) + converted_variable = np.concatenate(variable, dtype=var_dty) # Output ctype out_ty = bin_func.byref_arg_types[4] * num_evaluations @@ -715,17 +728,27 @@ def thread_evaluate(self, variable, num_evaluations): ct_results = out_ty() ct_variable = converted_variale.ctypes.data_as(self.__bin_func.c_func.argtypes[5]) - # There are 7 arguments to evaluate_alloc_range: - # comp_param, comp_state, from, to, results, input, comp_data jobs = min(os.cpu_count(), num_evaluations) evals_per_job = (num_evaluations + jobs - 1) // jobs - executor = concurrent.futures.ThreadPoolExecutor(max_workers=jobs) - for i in range(jobs): - start = i * evals_per_job - stop = min((i + 1) * evals_per_job, num_evaluations) - executor.submit(self.__bin_func, ct_param, ct_state, int(start), - int(stop), ct_results, ct_variable, ct_data) - - executor.shutdown() + + parallel_start = time.time() + with concurrent.futures.ThreadPoolExecutor(max_workers=jobs) as ex: + # There are 7 arguments to evaluate_alloc_range: + # comp_param, comp_state, from, to, results, input, comp_data + results = [ex.submit(self.__bin_func, ct_param, ct_state, + int(i * evals_per_job), + min((i + 1) * evals_per_job, num_evaluations), + ct_results, ct_variable, ct_data) + for i in range(jobs)] + + parallel_stop = time.time() + if "time_stat" in self._debug_env: + print("Time to run {} executions of '{}' in {} threads: {}".format( + num_evaluations, self.__bin_func.name, jobs, + parallel_stop - parallel_start)) + + + exceptions = [r.exception() for r in results] + assert all(e is None for e in exceptions), "Not all jobs finished sucessfully: {}".format(exceptions) return ct_results diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 6b54aaf4bde..bdb887e7ddc 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -144,15 +144,14 @@ def get_state_ptr(builder, component, state_ptr, stateful_name, hist_idx=0): return ptr -def push_state_val(builder, component, state_ptr, name, new_val): +def get_state_space(builder, component, state_ptr, name): val_ptr = get_state_ptr(builder, component, state_ptr, name, None) for i in range(len(val_ptr.type.pointee) - 1, 0, -1): dest_ptr = get_state_ptr(builder, component, state_ptr, name, i) src_ptr = get_state_ptr(builder, component, state_ptr, name, i - 1) builder.store(builder.load(src_ptr), dest_ptr) - dest_ptr = get_state_ptr(builder, component, state_ptr, name) - builder.store(builder.load(new_val), dest_ptr) + return get_state_ptr(builder, component, state_ptr, name) def unwrap_2d_array(builder, element): @@ -219,7 +218,8 @@ def csch(ctx, builder, x): def is_close(ctx, builder, val1, val2, rtol=1e-05, atol=1e-08): - is_close_f = ctx.get_builtin("is_close") + assert val1.type == val2.type + is_close_f = ctx.get_builtin("is_close_{}".format(val1.type)) rtol_val = val1.type(rtol) atol_val = val1.type(atol) return builder.call(is_close_f, [val1, val2, rtol_val, atol_val]) @@ -304,6 +304,24 @@ def convert_type(builder, val, t): # Python integers are signed return builder.fptosi(val, t) + if is_floating_point(val) and is_floating_point(t): + if isinstance(val.type, ir.HalfType) or isinstance(t, ir.DoubleType): + return builder.fpext(val, t) + elif isinstance(val.type, ir.DoubleType) or isinstance(t, ir.HalfType): + # FIXME: Direct conversion from double to half needs a runtime + # function (__truncdfhf2). llvmlite MCJIT fails to provide + # it and instead generates invocation of a NULL pointer. + # Use double conversion (double->float->half) instead. + # Both steps can be done in one CPU instruction, + # but the result can be slightly different + # see: https://github.com/numba/llvmlite/issues/834 + if isinstance(val.type, ir.DoubleType) and isinstance(t, ir.HalfType): + val = builder.fptrunc(val, ir.FloatType()) + return builder.fptrunc(val, t) + else: + assert val.type == t + return val + assert False, "Unknown type conversion: {} -> {}".format(val.type, t) @@ -463,18 +481,24 @@ def get_private_condition_initializer(self, composition): return ((0, 0, 0), tuple((0, (-1, -1, -1)) for _ in composition.nodes)) - def get_condition_struct_type(self, composition=None): - composition = self.composition if composition is None else composition - structs = [self.get_private_condition_struct_type(composition)] - for node in composition.nodes: - structs.append(self.get_condition_struct_type(node) if isinstance(node, type(self.composition)) else ir.LiteralStructType([])) + def get_condition_struct_type(self, node=None): + node = self.composition if node is None else node + + subnodes = getattr(node, 'nodes', []) + structs = [self.get_condition_struct_type(n) for n in subnodes] + if len(structs) != 0: + structs.insert(0, self.get_private_condition_struct_type(node)) + return ir.LiteralStructType(structs) - def get_condition_initializer(self, composition=None): - composition = self.composition if composition is None else composition - data = [self.get_private_condition_initializer(composition)] - for node in composition.nodes: - data.append(self.get_condition_initializer(node) if isinstance(node, type(self.composition)) else tuple()) + def get_condition_initializer(self, node=None): + node = self.composition if node is None else node + + subnodes = getattr(node, 'nodes', []) + data = [self.get_condition_initializer(n) for n in subnodes] + if len(data) != 0: + data.insert(0, self.get_private_condition_initializer(node)) + return tuple(data) def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): diff --git a/psyneulink/core/scheduling/condition.py b/psyneulink/core/scheduling/condition.py index aba519892b7..2d0e1fbfdf3 100644 --- a/psyneulink/core/scheduling/condition.py +++ b/psyneulink/core/scheduling/condition.py @@ -20,7 +20,7 @@ import numpy as np from psyneulink.core.globals.context import handle_external_context -from psyneulink.core.globals.json import JSONDumpable +from psyneulink.core.globals.mdf import MDFSerializable from psyneulink.core.globals.keywords import MODEL_SPEC_ID_TYPE, comparison_operators from psyneulink.core.globals.parameters import parse_context from psyneulink.core.globals.utilities import parse_valid_identifier @@ -58,7 +58,7 @@ def _create_as_pnl_condition(condition): return res -class Condition(graph_scheduler.Condition, JSONDumpable): +class Condition(graph_scheduler.Condition, MDFSerializable): @handle_external_context() def is_satisfied(self, *args, context=None, execution_id=None, **kwargs): if execution_id is None: @@ -293,6 +293,6 @@ def as_mdf_model(self): m = super().as_mdf_model() if self.parameter == 'value': - m.args['parameter'] = f'{self.dependency.name}_OutputPort_0' + m.kwargs['parameter'] = f'{self.dependency.name}_OutputPort_0' return m diff --git a/psyneulink/core/scheduling/scheduler.py b/psyneulink/core/scheduling/scheduler.py index f163dc19900..3db80e0551a 100644 --- a/psyneulink/core/scheduling/scheduler.py +++ b/psyneulink/core/scheduling/scheduler.py @@ -8,6 +8,7 @@ # ********************************************* Scheduler ************************************************************** import copy +import logging import typing import graph_scheduler @@ -15,7 +16,7 @@ from psyneulink import _unit_registry from psyneulink.core.globals.context import Context, handle_external_context -from psyneulink.core.globals.json import JSONDumpable +from psyneulink.core.globals.mdf import MDFSerializable from psyneulink.core.globals.utilities import parse_valid_identifier from psyneulink.core.scheduling.condition import _create_as_pnl_condition @@ -24,10 +25,11 @@ ] +logger = logging.getLogger(__name__) SchedulingMode = graph_scheduler.scheduler.SchedulingMode -class Scheduler(graph_scheduler.Scheduler, JSONDumpable): +class Scheduler(graph_scheduler.Scheduler, MDFSerializable): def __init__( self, composition=None, @@ -50,7 +52,7 @@ def __init__( default_execution_id = composition.default_execution_id # TODO: consider integrating something like this into graph-scheduler? - self._user_specified_conds = copy.copy(conditions) + self._user_specified_conds = copy.copy(conditions) if conditions is not None else {} super().__init__( graph=graph, @@ -70,19 +72,51 @@ def replace_term_conds(term_conds): self.default_termination_conds = replace_term_conds(self.default_termination_conds) self.termination_conds = replace_term_conds(self.termination_conds) + def _validate_conditions(self): + unspecified_nodes = [] + for node in self.nodes: + if node not in self.conditions: + dependencies = list(self.dependency_dict[node]) + if len(dependencies) == 0: + cond = graph_scheduler.Always() + elif len(dependencies) == 1: + cond = graph_scheduler.EveryNCalls(dependencies[0], 1) + else: + cond = graph_scheduler.All(*[graph_scheduler.EveryNCalls(x, 1) for x in dependencies]) + + # TODO: replace this call in graph-scheduler if adding _user_specified_conds + self._add_condition(node, cond) + unspecified_nodes.append(node) + if len(unspecified_nodes) > 0: + logger.info( + 'These nodes have no Conditions specified, and will be scheduled with conditions: {0}'.format( + {node: self.conditions[node] for node in unspecified_nodes} + ) + ) + def add_condition(self, owner, condition): - super().add_condition(owner, _create_as_pnl_condition(condition)) + self._user_specified_conds[owner] = condition + self._add_condition(owner, condition) + + def _add_condition(self, owner, condition): + condition = _create_as_pnl_condition(condition) + super().add_condition(owner, condition) def add_condition_set(self, conditions): + self._user_specified_conds.update(conditions) + self._add_condition_set(conditions) + + def _add_condition_set(self, conditions): try: conditions = conditions.conditions except AttributeError: pass - super().add_condition_set({ + conditions = { node: _create_as_pnl_condition(cond) for node, cond in conditions.items() - }) + } + super().add_condition_set(conditions) @handle_external_context(fallback_default=True) def run( diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py index f97e9a80003..92c245d3275 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py @@ -169,6 +169,7 @@ from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import \ INIT_EXECUTE_METHOD_ONLY, MECHANISM, OBJECTIVE_MECHANISM +from psyneulink.core.globals.parameters import check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -244,6 +245,7 @@ class AGTControlMechanism(ControlMechanism): # PREFERENCE_SET_NAME: 'ControlMechanismClassPreferences', # PREFERENCE_KEYWORD: ...} + @check_user_specified @tc.typecheck def __init__(self, monitored_output_ports=None, diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py index bcee443a12e..ae6b0af7a9d 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py @@ -307,7 +307,7 @@ from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import \ INIT_EXECUTE_METHOD_ONLY, MULTIPLICATIVE_PARAM, PROJECTIONS -from psyneulink.core.globals.parameters import Parameter, ParameterAlias +from psyneulink.core.globals.parameters import Parameter, ParameterAlias, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import is_iterable, convert_to_list @@ -662,6 +662,7 @@ class Parameters(ControlMechanism.Parameters): modulated_mechanisms = Parameter(None, stateful=False, loggable=False) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -821,8 +822,7 @@ def _execute( ): """Updates LCControlMechanism's ControlSignal based on input and mode parameter value """ - # IMPLEMENTATION NOTE: skip ControlMechanism._execute since it is a stub method that returns input_values - output_values = super(ControlMechanism, self)._execute( + output_values = super()._execute( variable=variable, context=context, runtime_params=runtime_params, @@ -834,58 +834,54 @@ def _execute( return gain_t, output_values[0], output_values[1], output_values[2] - def _gen_llvm_invoke_function(self, ctx, builder, function, params, state, variable, *, tags:frozenset): - assert function is self.function - mf_out, builder = super()._gen_llvm_invoke_function(ctx, builder, function, params, state, variable, tags=tags) + def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, m_state, m_in, + m_val, ip_output, *, tags:frozenset): + mf_out, builder = super()._gen_llvm_mechanism_functions(ctx, builder, m_base_params, + m_params, m_state, m_in, + None, ip_output, tags=tags) # prepend gain type (matches output[1] type) gain_ty = mf_out.type.pointee.elements[1] - elements = gain_ty, *mf_out.type.pointee.elements - elements_ty = pnlvm.ir.LiteralStructType(elements) - # allocate new output type - new_out = builder.alloca(elements_ty, name="function_out") + assert all(e == gain_ty for e in mf_out.type.pointee.elements) + mech_out_ty = pnlvm.ir.ArrayType(gain_ty, len(mf_out.type.pointee.elements) + 1) + + # allocate a new output location if the type doesn't match the one + # provided by the caller. + if mech_out_ty != m_val.type.pointee: + m_val = builder.alloca(mech_out_ty, name="mechanism_out") # Load mechanism parameters - params = builder.function.args[0] - scaling_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, + scaling_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, m_params, "scaling_factor_gain") - base_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, + base_factor_ptr = pnlvm.helpers.get_param_ptr(builder, self, m_params, "base_level_gain") - # If modulated, scaling factor is a single element array - if isinstance(scaling_factor_ptr.type.pointee, pnlvm.ir.ArrayType): - assert len(scaling_factor_ptr.type.pointee) == 1 - scaling_factor_ptr = builder.gep(scaling_factor_ptr, - [ctx.int32_ty(0), ctx.int32_ty(0)]) - # If modulated, base factor is a single element array - if isinstance(base_factor_ptr.type.pointee, pnlvm.ir.ArrayType): - assert len(base_factor_ptr.type.pointee) == 1 - base_factor_ptr = builder.gep(base_factor_ptr, - [ctx.int32_ty(0), ctx.int32_ty(0)]) - scaling_factor = builder.load(scaling_factor_ptr) - base_factor = builder.load(base_factor_ptr) - - # Apply to the entire vector + # If modulated, parameters are single element array + scaling_factor = pnlvm.helpers.load_extract_scalar_array_one(builder, scaling_factor_ptr) + base_factor = pnlvm.helpers.load_extract_scalar_array_one(builder, base_factor_ptr) + + # Apply to the entire first subvector vi = builder.gep(mf_out, [ctx.int32_ty(0), ctx.int32_ty(1)]) - vo = builder.gep(new_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) + vo = builder.gep(m_val, [ctx.int32_ty(0), ctx.int32_ty(0)]) with pnlvm.helpers.array_ptr_loop(builder, vi, "LC_gain") as (b1, index): in_ptr = b1.gep(vi, [ctx.int32_ty(0), index]) + out_ptr = b1.gep(vo, [ctx.int32_ty(0), index]) + val = b1.load(in_ptr) val = b1.fmul(val, scaling_factor) val = b1.fadd(val, base_factor) - out_ptr = b1.gep(vo, [ctx.int32_ty(0), index]) b1.store(val, out_ptr) # copy the main function return value for i, _ in enumerate(mf_out.type.pointee.elements): ptr = builder.gep(mf_out, [ctx.int32_ty(0), ctx.int32_ty(i)]) - out_ptr = builder.gep(new_out, [ctx.int32_ty(0), ctx.int32_ty(i + 1)]) + out_ptr = builder.gep(m_val, [ctx.int32_ty(0), ctx.int32_ty(i + 1)]) val = builder.load(ptr) builder.store(val, out_ptr) - return new_out, builder + return m_val, builder # 5/8/20: ELIMINATE SYSTEM # SEEMS TO STILL BE USED BY SOME MODELS; DELETE WHEN THOSE ARE UPDATED diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py index ec540c8764e..80e0e5fb43a 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py @@ -104,7 +104,7 @@ from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.keywords import \ ADDITIVE, AUTOASSOCIATIVE_LEARNING_MECHANISM, LEARNING, LEARNING_PROJECTION, LEARNING_SIGNAL, NAME, OWNER_VALUE, VARIABLE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import is_numeric, parameter_spec @@ -319,6 +319,7 @@ class Parameters(LearningMechanism.Parameters): classPreferenceLevel = PreferenceLevel.TYPE + @check_user_specified @tc.typecheck def __init__(self, default_variable:tc.any(list, np.ndarray), diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py index d6717abd1d9..9122b97282b 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py @@ -108,7 +108,7 @@ from psyneulink.core.globals.keywords import \ ADDITIVE, KOHONEN_LEARNING_MECHANISM, \ LEARNING, LEARNING_PROJECTION, LEARNING_SIGNAL -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import is_numeric, parameter_spec @@ -320,6 +320,7 @@ class Parameters(LearningMechanism.Parameters): learning_timing = LearningTiming.EXECUTION_PHASE modulation = ADDITIVE + @check_user_specified @tc.typecheck def __init__(self, default_variable:tc.any(list, np.ndarray), diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index c3bac361a1d..5f790fc9200 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -380,7 +380,7 @@ from psyneulink.core.globals.keywords import \ ALLOCATION_SAMPLES, FUNCTION, FUNCTION_PARAMS, INPUT_PORT_VARIABLES, NAME, OWNER_VALUE, \ THRESHOLD, VARIABLE, PREFERENCE_SET_NAME -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric, is_same_function_spec, object_has_single_value, get_global_seed @@ -753,6 +753,7 @@ class Parameters(ProcessingMechanism.Parameters): ] standard_output_port_names = [i['name'] for i in standard_output_ports] + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1099,25 +1100,20 @@ def _execute( return_value[self.DECISION_VARIABLE_INDEX] = threshold return return_value - def _gen_llvm_invoke_function(self, ctx, builder, function, params, state, variable, *, tags:frozenset): - - mf_out, builder = super()._gen_llvm_invoke_function(ctx, builder, function, params, state, variable, tags=tags) - - mech_out_ty = ctx.convert_python_struct_to_llvm_ir(self.defaults.value) - mech_out = builder.alloca(mech_out_ty, name="mech_out") + def _gen_llvm_invoke_function(self, ctx, builder, function, params, state, + variable, m_val, *, tags:frozenset): if isinstance(self.function, IntegratorFunction): - # Integrator version of the DDM mechanism converts the - # second element to a 2d array - builder.store(builder.load(builder.gep(mf_out, [ctx.int32_ty(0), - ctx.int32_ty(0)])), - builder.gep(mech_out, [ctx.int32_ty(0), - ctx.int32_ty(0)])) - builder.store(builder.load(builder.gep(mf_out, [ctx.int32_ty(0), - ctx.int32_ty(1)])), - builder.gep(mech_out, [ctx.int32_ty(0), - ctx.int32_ty(1)])) + # Integrator based DDM works like other mechanisms + return super()._gen_llvm_invoke_function(ctx, builder, function, + params, state, variable, + m_val, tags=tags) + elif isinstance(self.function, DriftDiffusionAnalytical): + mf_out, builder = super()._gen_llvm_invoke_function(ctx, builder, function, + params, state, variable, + None, tags=tags) + # The order and number of returned values is different for DDA for res_idx, idx in enumerate((self.RESPONSE_TIME_INDEX, self.PROBABILITY_LOWER_THRESHOLD_INDEX, self.RT_CORRECT_MEAN_INDEX, @@ -1127,47 +1123,68 @@ def _gen_llvm_invoke_function(self, ctx, builder, function, params, state, varia self.RT_INCORRECT_VARIANCE_INDEX, self.RT_INCORRECT_SKEW_INDEX)): src = builder.gep(mf_out, [ctx.int32_ty(0), ctx.int32_ty(res_idx)]) - dst = builder.gep(mech_out, [ctx.int32_ty(0), ctx.int32_ty(idx)]) + dst = builder.gep(m_val, [ctx.int32_ty(0), ctx.int32_ty(idx)]) builder.store(builder.load(src), dst) - # Handle upper threshold probability - src = builder.gep(mf_out, [ctx.int32_ty(0), ctx.int32_ty(1), - ctx.int32_ty(0)]) - dst = builder.gep(mech_out, [ctx.int32_ty(0), - ctx.int32_ty(self.PROBABILITY_UPPER_THRESHOLD_INDEX), - ctx.int32_ty(0)]) + # Handle upper threshold probability (1 - Lower Threshold) + src = builder.gep(m_val, [ctx.int32_ty(0), + ctx.int32_ty(self.PROBABILITY_LOWER_THRESHOLD_INDEX), + ctx.int32_ty(0)]) + dst = builder.gep(m_val, [ctx.int32_ty(0), + ctx.int32_ty(self.PROBABILITY_UPPER_THRESHOLD_INDEX), + ctx.int32_ty(0)]) prob_lower_thr = builder.load(src) - prob_upper_thr = builder.fsub(prob_lower_thr.type(1), - prob_lower_thr) + prob_upper_thr = builder.fsub(prob_lower_thr.type(1), prob_lower_thr) builder.store(prob_upper_thr, dst) - # Load function threshold + # Store threshold as decision variable output + # this will be used by the mechanism to return the right decision threshold_ptr = pnlvm.helpers.get_param_ptr(builder, self.function, params, THRESHOLD) - threshold = pnlvm.helpers.load_extract_scalar_array_one(builder, - threshold_ptr) - # Load mechanism state to generate random numbers - mech_params = builder.function.args[0] - mech_state = builder.function.args[1] - random_state = ctx.get_random_state_ptr(builder, self, mech_state, mech_params) + threshold = pnlvm.helpers.load_extract_scalar_array_one(builder, threshold_ptr) + decision_ptr = builder.gep(m_val, [ctx.int32_ty(0), + ctx.int32_ty(self.DECISION_VARIABLE_INDEX), + ctx.int32_ty(0)]) + builder.store(threshold, decision_ptr) + else: + assert False, "Unknown mode in compiled DDM!" + + return m_val, builder + + def _gen_llvm_mechanism_functions(self, ctx, builder, m_base_params, m_params, m_state, m_in, + m_val, ip_output, *, tags:frozenset): + + mf_out, builder = super()._gen_llvm_mechanism_functions(ctx, builder, m_base_params, + m_params, m_state, m_in, m_val, + ip_output, tags=tags) + assert mf_out is m_val + + if isinstance(self.function, DriftDiffusionAnalytical): + random_state = ctx.get_random_state_ptr(builder, self, m_state, m_params) random_f = ctx.get_uniform_dist_function_by_state(random_state) random_val_ptr = builder.alloca(random_f.args[1].type.pointee, name="random_out") builder.call(random_f, [random_state, random_val_ptr]) random_val = builder.load(random_val_ptr) # Convert ER to decision variable: - dst = builder.gep(mech_out, [ctx.int32_ty(0), - ctx.int32_ty(self.DECISION_VARIABLE_INDEX), - ctx.int32_ty(0)]) + prob_lthr_ptr = builder.gep(m_val, [ctx.int32_ty(0), + ctx.int32_ty(self.PROBABILITY_LOWER_THRESHOLD_INDEX), + ctx.int32_ty(0)]) + prob_lower_thr = builder.load(prob_lthr_ptr) thr_cmp = builder.fcmp_ordered("<", random_val, prob_lower_thr) + + # The correct (modulated) threshold value is passed as + # decision variable output + decision_ptr = builder.gep(m_val, [ctx.int32_ty(0), + ctx.int32_ty(self.DECISION_VARIABLE_INDEX), + ctx.int32_ty(0)]) + threshold = builder.load(decision_ptr) neg_threshold = pnlvm.helpers.fneg(builder, threshold) res = builder.select(thr_cmp, neg_threshold, threshold) - builder.store(res, dst) - else: - assert False, "Unknown mode in compiled DDM!" + builder.store(res, decision_ptr) - return mech_out, builder + return m_val, builder @handle_external_context(fallback_most_recent=True) def reset(self, *args, force=False, context=None, **kwargs): diff --git a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py index 268ba986a5b..3286eff87c8 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py @@ -415,7 +415,7 @@ from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base from psyneulink.core.components.ports.inputport import InputPort from psyneulink.core.globals.keywords import EPISODIC_MEMORY_MECHANISM, INITIALIZER, NAME, OWNER_VALUE, VARIABLE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import deprecation_warning, convert_to_np_array, convert_all_elements_to_np_array @@ -512,6 +512,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): variable = Parameter([[0,0]], pnl_internal=True, constructor_argument='default_variable') function = Parameter(ContentAddressableMemory, stateful=False, loggable=False) + @check_user_specified def __init__(self, default_variable:Union[int, list, np.ndarray]=None, size:Optional[Union[int, list, np.ndarray]]=None, diff --git a/psyneulink/library/components/mechanisms/processing/leabramechanism.py b/psyneulink/library/components/mechanisms/processing/leabramechanism.py index 16cbc400030..ff78c9f3f39 100644 --- a/psyneulink/library/components/mechanisms/processing/leabramechanism.py +++ b/psyneulink/library/components/mechanisms/processing/leabramechanism.py @@ -106,7 +106,7 @@ from psyneulink.core.components.functions.function import Function_Base from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base from psyneulink.core.globals.keywords import LEABRA_FUNCTION, LEABRA_FUNCTION_TYPE, LEABRA_MECHANISM, NETWORK, PREFERENCE_SET_NAME -from psyneulink.core.globals.parameters import FunctionParameter, Parameter +from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.scheduling.time import TimeScale @@ -212,6 +212,7 @@ class Parameters(Function_Base.Parameters): variable = Parameter(np.array([[0], [0]]), read_only=True, pnl_internal=True, constructor_argument='default_variable') network = None + @check_user_specified def __init__(self, default_variable=None, network=None, @@ -471,6 +472,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): network = FunctionParameter(None) training_flag = Parameter(False, setter=_training_flag_setter, dependencies='network') + @check_user_specified def __init__(self, network=None, input_size=None, diff --git a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py index 50381e88984..3a8e169b380 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py @@ -153,7 +153,7 @@ from psyneulink.core.components.ports.port import _parse_port_spec from psyneulink.core.globals.keywords import \ COMPARATOR_MECHANISM, FUNCTION, INPUT_PORTS, NAME, OUTCOME, SAMPLE, TARGET, VARIABLE, PREFERENCE_SET_NAME, MSE, SSE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.utilities import \ @@ -323,6 +323,7 @@ class Parameters(ObjectiveMechanism.Parameters): standard_output_port_names = ObjectiveMechanism.standard_output_port_names.copy() standard_output_port_names.extend([SSE, MSE]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py index c106444d7f6..548d0a40d3a 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py @@ -172,7 +172,7 @@ from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import PREDICTION_ERROR_MECHANISM, SAMPLE, TARGET -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel, PREFERENCE_SET_NAME from psyneulink.core.globals.utilities import is_numeric @@ -283,6 +283,7 @@ class Parameters(ComparatorMechanism.Parameters): sample = None target = None + @check_user_specified @tc.typecheck def __init__(self, sample: tc.optional(tc.any(OutputPort, Mechanism_Base, dict, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index 69a48ef6fc5..d4e8482ebbc 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -342,7 +342,7 @@ from psyneulink.core.globals.keywords import \ CONTRASTIVE_HEBBIAN_MECHANISM, COUNT, FUNCTION, HARD_CLAMP, HOLLOW_MATRIX, MAX_ABS_DIFF, NAME, \ SIZE, SOFT_CLAMP, TARGET, VARIABLE -from psyneulink.core.globals.parameters import Parameter, SharedParameter +from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import is_numeric_or_none, parameter_spec from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import \ @@ -977,6 +977,7 @@ class Parameters(RecurrentTransferMechanism.Parameters): standard_output_port_names = RecurrentTransferMechanism.standard_output_port_names.copy() standard_output_port_names = [i['name'] for i in standard_output_ports] + @check_user_specified @tc.typecheck def __init__(self, input_size:int, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py index 9339c89b1d5..ba0d3840a41 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py @@ -90,7 +90,7 @@ from psyneulink.core.globals.keywords import \ DEFAULT_MATRIX, FUNCTION, GAUSSIAN, IDENTITY_MATRIX, KOHONEN_MECHANISM, \ LEARNING_SIGNAL, MATRIX, MAX_INDICATOR, NAME, OWNER_VALUE, OWNER_VARIABLE, RESULT, VARIABLE -from psyneulink.core.globals.parameters import Parameter, SharedParameter +from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import is_numeric_or_none, parameter_spec from psyneulink.library.components.mechanisms.modulatory.learning.kohonenlearningmechanism import KohonenLearningMechanism @@ -274,6 +274,7 @@ class Parameters(TransferMechanism.Parameters): FUNCTION: OneHot(mode=MAX_INDICATOR)} ]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py index 12c1369996e..2ffe285dfae 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py @@ -187,10 +187,11 @@ from psyneulink.core.components.functions.nonstateful.transferfunctions import Logistic from psyneulink.core.globals.keywords import KWTA_MECHANISM, K_VALUE, RATIO, RESULT, THRESHOLD -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.utilities import is_numeric_or_none from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import RecurrentTransferMechanism +from psyneulink.library.components.projections.pathway.autoassociativeprojection import get_auto_matrix, get_hetero_matrix __all__ = [ 'KWTAMechanism', 'KWTAError', @@ -342,6 +343,7 @@ class Parameters(RecurrentTransferMechanism.Parameters): average_based = False inhibition_only = True + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -414,6 +416,17 @@ def _instantiate_attributes_before_function(self, function=None, context=None): # so it shouldn't be a problem) self.indexOfInhibitionInputPort = len(self.input_ports) - 1 + # NOTE: this behavior matches what kwta tests assert. Values for + # auto and hetero were ALWAYS "user_specified" due to using + # values set in KWTAMechanism.__init__. To change this and use + # default RecurrentTransferMechanism behavior, the test values + # must be changed + matrix = ( + get_auto_matrix(self.defaults.auto, self.recurrent_size) + + get_hetero_matrix(self.defaults.hetero, self.recurrent_size) + ) + self.parameters.matrix._set(matrix, context) + def _kwta_scale(self, current_input, context=None): k_value = self._get_current_parameter_value(self.parameters.k_value, context) threshold = self._get_current_parameter_value(self.parameters.threshold, context) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py index 9402929ec4c..31dd42b52d4 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py @@ -199,7 +199,7 @@ from psyneulink.core.globals.keywords import \ CONVERGENCE, FUNCTION, GREATER_THAN_OR_EQUAL, LCA_MECHANISM, LESS_THAN_OR_EQUAL, MATRIX, NAME, \ RESULT, TERMINATION_THRESHOLD, TERMINATION_MEASURE, TERMINATION_COMPARISION_OP, VALUE, INVERSE_HOLLOW_MATRIX, AUTO -from psyneulink.core.globals.parameters import FunctionParameter, Parameter +from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import \ RecurrentTransferMechanism, _recurrent_transfer_mechanism_matrix_getter, _recurrent_transfer_mechanism_matrix_setter @@ -437,6 +437,7 @@ def _validate_integration_rate(self, integration_rate): {NAME:MAX_VS_AVG, FUNCTION:max_vs_avg}]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index bd300b0c98c..3724b53f732 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -210,7 +210,7 @@ from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ AUTO, ENERGY, ENTROPY, HETERO, HOLLOW_MATRIX, INPUT_PORT, MATRIX, NAME, RECURRENT_TRANSFER_MECHANISM, RESULT -from psyneulink.core.globals.parameters import Parameter, SharedParameter +from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.registry import register_instance, remove_instance_from_registry from psyneulink.core.globals.socket import ConnectionInfo @@ -644,6 +644,7 @@ class Parameters(TransferMechanism.Parameters): standard_output_port_names = TransferMechanism.standard_output_port_names.copy() standard_output_port_names.extend([ENERGY_OUTPUT_PORT_NAME, ENTROPY_OUTPUT_PORT_NAME]) + @check_user_specified @tc.typecheck def __init__(self, default_variable=None, @@ -1340,6 +1341,8 @@ def _gen_llvm_input_ports(self, ctx, builder, params, state, arg_in): # input builder.call(recurrent_f, [recurrent_params, recurrent_state, recurrent_in, recurrent_out]) + assert not self.has_recurrent_input_port, "Configuration using combination function is not supported!" + return super()._gen_llvm_input_ports(ctx, builder, params, state, arg_in) def _gen_llvm_output_ports(self, ctx, builder, value, diff --git a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py index 011106e512e..98c9948ca5d 100644 --- a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py +++ b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py @@ -112,7 +112,7 @@ from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import AUTO_ASSOCIATIVE_PROJECTION, DEFAULT_MATRIX, HOLLOW_MATRIX, FUNCTION, OWNER_MECH -from psyneulink.core.globals.parameters import SharedParameter, Parameter +from psyneulink.core.globals.parameters import SharedParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -236,6 +236,7 @@ class Parameters(MappingProjection.Parameters): classPreferenceLevel = PreferenceLevel.TYPE + @check_user_specified @tc.typecheck def __init__(self, owner=None, diff --git a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py index c521d123319..7fd93defa26 100644 --- a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py +++ b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py @@ -73,6 +73,7 @@ from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.components.projections.projection import projection_keywords from psyneulink.core.globals.keywords import MASKED_MAPPING_PROJECTION, MATRIX +from psyneulink.core.globals.parameters import check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -170,6 +171,7 @@ def _validate_mask_operation(self, mode): classPreferenceLevel = PreferenceLevel.TYPE + @check_user_specified @tc.typecheck def __init__(self, sender=None, diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 81142b6f4f2..28b4e6cf81d 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -150,7 +150,7 @@ from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context from psyneulink.core.globals.keywords import AUTODIFF_COMPOSITION, SOFT_CLAMP from psyneulink.core.scheduling.scheduler import Scheduler -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.scheduling.time import TimeScale from psyneulink.core import llvm as pnlvm @@ -222,6 +222,7 @@ class Parameters(Composition.Parameters): pytorch_representation = None # TODO (CW 9/28/18): add compositions to registry so default arg for name is no longer needed + @check_user_specified def __init__(self, learning_rate=None, optimizer_type='sgd', diff --git a/psyneulink/library/compositions/gymforagercfa.py b/psyneulink/library/compositions/gymforagercfa.py index 64250e035f6..e8b0e3f535b 100644 --- a/psyneulink/library/compositions/gymforagercfa.py +++ b/psyneulink/library/compositions/gymforagercfa.py @@ -81,7 +81,7 @@ from psyneulink.library.compositions.regressioncfa import RegressionCFA from psyneulink.core.components.functions.nonstateful.learningfunctions import BayesGLM from psyneulink.core.globals.keywords import DEFAULT_VARIABLE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified __all__ = ['GymForagerCFA'] @@ -108,6 +108,7 @@ class GymForagerCFA(RegressionCFA): class Parameters(RegressionCFA.Parameters): update_weights = Parameter(BayesGLM, stateful=False, loggable=False) + @check_user_specified def __init__(self, name=None, update_weights=BayesGLM, diff --git a/psyneulink/library/compositions/pytorchcomponents.py b/psyneulink/library/compositions/pytorchcomponents.py index 27f72292951..43122730437 100644 --- a/psyneulink/library/compositions/pytorchcomponents.py +++ b/psyneulink/library/compositions/pytorchcomponents.py @@ -131,7 +131,9 @@ def _gen_llvm_execute_derivative_func(self, ctx, builder, state, params, arg_in) self._mechanism.function, f_params_ptr, ctx, builder, mech_params, mech_state, mech_input) f_state = pnlvm.helpers.get_state_ptr(builder, self._mechanism, mech_state, "function") - output, _ = self._mechanism._gen_llvm_invoke_function(ctx, builder, self._mechanism.function, f_params, f_state, mech_input, tags=frozenset({"derivative"})) + output, _ = self._mechanism._gen_llvm_invoke_function(ctx, builder, self._mechanism.function, + f_params, f_state, mech_input, None, + tags=frozenset({"derivative"})) return builder.gep(output, [ctx.int32_ty(0), ctx.int32_ty(0)]) diff --git a/psyneulink/library/compositions/regressioncfa.py b/psyneulink/library/compositions/regressioncfa.py index 7682d9ecbba..5d1f3eef154 100644 --- a/psyneulink/library/compositions/regressioncfa.py +++ b/psyneulink/library/compositions/regressioncfa.py @@ -85,7 +85,7 @@ from psyneulink.core.components.ports.port import _parse_port_spec from psyneulink.core.compositions.compositionfunctionapproximator import CompositionFunctionApproximator from psyneulink.core.globals.keywords import ALL, CONTROL_SIGNALS, DEFAULT_VARIABLE, VARIABLE -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.utilities import get_deepcopy_with_shared, powerset, tensor_power __all__ = ['PREDICTION_TERMS', 'PV', 'RegressionCFA'] @@ -246,6 +246,7 @@ class Parameters(CompositionFunctionApproximator.Parameters): previous_state = None regression_weights = None + @check_user_specified def __init__(self, name=None, update_weights=None, diff --git a/requirements.txt b/requirements.txt index 02d2dd607e5..302ecce50e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,22 +1,22 @@ -autograd<=1.3 -graph-scheduler>=0.2.0, <1.1.1 +autograd<1.5 +graph-scheduler>=0.2.0, <1.1.2 dill<=0.32 -elfi<0.8.4 -graphviz<0.20.0 +elfi<0.8.5 +graphviz<0.21.0 grpcio<1.43.0 grpcio-tools<1.43.0 -llvmlite<0.39 -matplotlib<3.5.2 -modeci_mdf>=0.3.2, <0.3.4 -modelspec<0.2.0 -networkx<2.8 -numpy<1.21.4, >=1.17.0 -pillow<9.1.0 -pint<0.18 +llvmlite<0.40 +matplotlib<3.5.3 +modeci_mdf>=0.3.4, <0.4.2 +modelspec<0.2.6 +networkx<2.9 +numpy<1.21.7, >=1.17.0 +pillow<9.3.0 +pint<0.20.0 toposort<1.8 -torch>=1.8.0, <2.0.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' +torch>=1.8.0, <1.12.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' typecheck-decorator<=1.2 leabra-psyneulink<=0.3.2 rich>=10.1, <10.13 -pandas<=1.4.1 -fastkde==1.0.19 \ No newline at end of file +pandas<1.4.4 +fastkde==1.0.19 diff --git a/setup.cfg b/setup.cfg index 9bc862ce6b8..fbd3d06c3bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,6 +34,7 @@ markers = cuda: Tests using LLVM runtime compiler and CUDA GPGPU backend control: Tests including control mechanism and/or control projection state_features: Tests for OptimizationControlMechanism state_features specifications + pathways: Tests for pathway arg of Composition constructor and node Roles projection nested: Tests including nested compositions function: Tests of Function classes diff --git a/tests/components/test_general.py b/tests/components/test_general.py index 762bf894a07..dd4c8f23de7 100644 --- a/tests/components/test_general.py +++ b/tests/components/test_general.py @@ -55,33 +55,12 @@ def test_function_parameters_stateless(class_): pass -@pytest.mark.parametrize( - 'class_', - component_classes -) -def test_parameters_user_specified(class_): - violators = set() - constructor_parameters = inspect.signature(class_.__init__).parameters - for name, param in constructor_parameters.items(): - if ( - param.kind in { - inspect.Parameter.POSITIONAL_OR_KEYWORD, - inspect.Parameter.KEYWORD_ONLY - } - and name in class_.parameters.names() - and param.default is not inspect.Parameter.empty - and param.default is not None - ): - violators.add(name) - - message = ( - "If a value other than None is used as the default value in a class's" - + ' constructor/__init__, for an argument corresponding to a Parameter,' - + ' _user_specified will always be True. The default value should be' - + " specified in the class's Parameters inner class. Violators for" - + f' {class_.__name__}: {violators}' +@pytest.mark.parametrize("class_", component_classes) +def test_constructors_have_check_user_specified(class_): + assert "check_user_specified" in inspect.getsource(class_.__init__), ( + f"The __init__ method of Component {class_.__name__} must be wrapped by" + f" check_user_specified in {pnl.core.globals.parameters.check_user_specified.__module__}" ) - assert violators == set(), message @pytest.fixture(scope='module') diff --git a/tests/composition/test_autodiffcomposition.py b/tests/composition/test_autodiffcomposition.py index 6eab099d8fc..2bc81653862 100644 --- a/tests/composition/test_autodiffcomposition.py +++ b/tests/composition/test_autodiffcomposition.py @@ -333,8 +333,14 @@ def test_optimizer_specs(self, learning_rate, weight_decay, optimizer_type, expe "targets": {xor_out:xor_targets}, "epochs": 10}, execution_mode=autodiff_mode) + # fp32 results are different due to rounding + if pytest.helpers.llvm_current_fp_precision() == 'fp32' and \ + autodiff_mode != pnl.ExecutionMode.Python and \ + optimizer_type == 'sgd' and \ + learning_rate == 10: + expected = [[[0.9918830394744873]], [[0.9982172846794128]], [[0.9978305697441101]], [[0.9994590878486633]]] # FIXME: LLVM version is broken with learning rate == 1.5 - if learning_rate != 1.5 or autodiff_mode is pnl.ExecutionMode.Python: + if learning_rate != 1.5 or autodiff_mode == pnl.ExecutionMode.Python: assert np.allclose(results_before_proc, expected) if benchmark.enabled: diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 1f06cce8a50..230af9f4c34 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -575,6 +575,7 @@ def test_unused_projections_warning(self): assert repr(warning[1].message.args[0]) == '"\\nThe following Projections were specified but are not being used by Nodes in \'COMP_2\':\\n\\tMappingProjection from A[OutputPort-0] to C[InputPort-0] (to \'C\' from \'A\')."' +@pytest.mark.pathways class TestPathway: def test_pathway_standalone_object(self): @@ -616,7 +617,47 @@ def test_pathway_illegal_arg_error(self): assert "Illegal argument(s) used in constructor for Pathway: foo." in str(error_text.value) -class TestCompositionPathwayAdditionMethods: +@pytest.mark.pathways +class TestCompositionPathwayArgsAndAdditionMethods: + + def test_add_pathways_with_all_types(self): + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + D = ProcessingMechanism(name='D') + E = ProcessingMechanism(name='E') + X = ProcessingMechanism(name='X') + Y = ProcessingMechanism(name='Y') + F = ProcessingMechanism(name='F') + G = ProcessingMechanism(name='G') + H = ProcessingMechanism(name='H') + J = ProcessingMechanism(name='J') + K = ProcessingMechanism(name='K') + L = ProcessingMechanism(name='L') + M = ProcessingMechanism(name='M') + + # FIX: 4/9/22 - ADD SET SPEC + p = Pathway(pathway=[L,M], name='P') + c = Composition() + c.add_pathways(pathways=[A, + [B,C], + (D,E), + {X,Y}, + {'DICT PATHWAY': F}, + ([G, H], BackPropagation), + {'LEARNING PATHWAY': ([J,K], Reinforcement)}, + p]) + assert len(c.pathways) == 8 + assert isinstance(c.pathways[0].pathway, list) and len(c.pathways[0].pathway) == 1 + assert isinstance(c.pathways[1].pathway, list) and len(c.pathways[1].pathway) == 3 + assert isinstance(c.pathways[2].pathway, list) and len(c.pathways[2].pathway) == 3 + assert isinstance(c.pathways[3].pathway[0], set) and len(c.pathways[3].pathway) == 1 + assert c.pathways['P'].input == L + assert c.pathways['DICT PATHWAY'].input == F + assert c.pathways['DICT PATHWAY'].output == F + assert c.pathways['LEARNING PATHWAY'].output == K + assert [p for p in c.pathways if p.input == G][0].learning_function == BackPropagation + assert c.pathways['LEARNING PATHWAY'].learning_function == Reinforcement def test_pathway_attributes(self): c = Composition() @@ -837,44 +878,148 @@ def test_add_td_learning_pathway_arg_pathway(self): PathwayRole.LEARNING, PathwayRole.OUTPUT} - def test_add_pathways_with_all_types(self): + config = [ + ('[A,{B,C}]', 's1'), # SEQUENTIAL A->{B,C}) + ('[A,[B,C]]', 'p1'), # PARALLEL: A, B->C + ('[{A},{B,C}]', 's1'), # SEQUENTIAL: A->{B,C} + ('[[A],{B,C}]', 'p2'), # PARALLEL: A, B, C + ('[[A,B],{C,D}]', 'p3'), # PARALLEL: A->B, C, D + ('[[A,B],C,D ]', 'p3'), # PARALLEL: A->B, C, D + ('[[A,B],[C,D]]', 'p5'), # PARALLEL: A->B, C->D + ('[{A,B}, MapProj(B,D), C, D]', 's2'), # SEQUENTIAL: A, B->D, C->D + ('[{A,B}, [MapProj(B,D)], C, D]', 's2'), # SEQUENTIAL: A, B->D, C->D + ('[{A,B}, {MapProj(B,D)}, C, D]', 's2'), # SEQUENTIAL: A, B->D, C->D + ('[{A,B}, [[C,D]]]', 'p4'), # PARALLEL: A, B, C->D (FORGIVES EMBEDDED LIST OF [C,D]) + ('[[A,B], [[C,D]]]', 'p5'), # PARALLEL: A->B, C->D (FORGIVES EMBEDDED LIST OF [C,D]) + ('[[[A,B]], [[C,D]]]','p5'), # PARALLEL: A->B, C->D (FORGIVES EMBEDDED LISTS OF [A, B] and [C,D]) + ('[A, "B"]','e1'), # BAD ITEM ERROR + ('[[A,B, [C,D]],[E,F]]','e2'), # EMBEDDED LIST ERROR + ('[{A,B}, [MapProj(B,D)], [C,D]]', 'e3') # BAD ITEM ERROR, FIX: SHOULD ALLOW EMBEDDED PER ABOVE + ] + @pytest.mark.parametrize('config', config, ids=[x[0] for x in config]) + def test_various_pathway_configurations_in_constructor(self, config): + """Test combinations of sets and lists in pathways specification of Composition constructor + Principles: + if outer list (pathways spec) contains: + - single item or only sets, treat single (sequential) pathway + - one or more lists within it, treat all items as a separate (parallel) pathways + - one or more lists each with a single list within it ([[[A,B]],[[C,D]]]}), each is treated as a pathway + - any list with more than a single list within it ([[[A,B],[C,D]}), an error is generated + - any bad items (strings, misplaced items), an error is generated + """ + A = ProcessingMechanism(name='A') B = ProcessingMechanism(name='B') + # B_comparator = ComparatorMechanism(name='B COMPARATOR') C = ProcessingMechanism(name='C') D = ProcessingMechanism(name='D') E = ProcessingMechanism(name='E') F = ProcessingMechanism(name='F') - G = ProcessingMechanism(name='G') - H = ProcessingMechanism(name='H') - J = ProcessingMechanism(name='J') - K = ProcessingMechanism(name='K') - L = ProcessingMechanism(name='L') - M = ProcessingMechanism(name='M') - p = Pathway(pathway=[L,M], name='P') - c = Composition() - c.add_pathways(pathways=[A, - [B,C], - (D,E), - {'DICT PATHWAY': F}, - ([G, H], BackPropagation), - {'LEARNING PATHWAY': ([J,K], Reinforcement)}, - p]) - assert len(c.pathways) == 7 - assert c.pathways['P'].input == L - assert c.pathways['DICT PATHWAY'].input == F - assert c.pathways['DICT PATHWAY'].output == F - assert c.pathways['LEARNING PATHWAY'].output == K - [p for p in c.pathways if p.input == G][0].learning_function == BackPropagation - assert c.pathways['LEARNING PATHWAY'].learning_function == Reinforcement + # LEGAL: + if config[0] == '[A,{B,C}]': # SEQUENTIAL A->{B,C}) (s1) + comp = Composition([A,{B,C}]) + elif config[0] == '[A,[B,C]]': # PARALLEL: A, B->C (p1) + comp = Composition([A,[B,C]]) + elif config[0] == '[{A},{B,C}]': # SEQUENTIAL: A->{B,C} (s1) + comp = Composition([{A},{B,C}]) + elif config[0] == '[[A],{B,C}]': # PARALLEL: A, B, C (p2) + comp = Composition([[A],{B,C}]) + elif config[0] == '[[A,B],{C,D}]': # PARALLEL: A->B, C, D (p3) + comp = Composition([[A,B],{C,D}]) + elif config[0] == '[[A,B],C,D ]': # PARALLEL: A->B, C, D (p3) + comp = Composition([[A,B],C,D ]) + elif config[0] == '[[A,B],[C,D]]': # PARALLEL: A->B, C->D {p5) + comp = Composition([[A,B],[C,D]]) + elif config[0] == '[{A,B}, MapProj(B,D), C, D]': # SEQUENTIAL: A, B->D, C->D (s2) + comp = Composition([{A,B}, MappingProjection(B,D), C, D]) + elif config[0] == '[{A,B}, [MapProj(B,D)], C, D]': # SEQUENTIAL: A, B->D, C->D (s2) + comp = Composition([{A,B}, [MappingProjection(B,D)], C, D]) + elif config[0] == '[{A,B}, {MapProj(B,D)}, C, D]': # SEQUENTIAL: A, B->D, C->D (s2) + comp = Composition([{A,B}, {MappingProjection(B,D)}, C, D]) + elif config[0] == '[{A,B}, [[C,D]]]': # PARALLEL: A, B, C->D (FORGIVES EMBEDDED LIST [C,D]) (p4) + comp = Composition([{A,B}, [[C,D]]]) + elif config[0] == '[[A,B], [[C,D]]]': # PARALLEL: A->B, C->D (SINGLE EMBEDDED LIST OK [C,D]) (p5) + comp = Composition([[A,B], [[C,D]]]) + elif config[0] == '[[[A,B]], [[C,D]]]': # PARALLEL: A->B, C->D (FORGIVES EMBEDDED LISTS [A,B] & [C,D]) (p5) + comp = Composition([[[A,B]], [[C,D]]]) + + # ERRORS: + elif config[0] == '[A, "B"]': # BAD ITEM ERROR (e1) + with pytest.raises(CompositionError) as error_text: + comp = Composition([A, "B"]) + assert f"Every item in the 'pathways' arg of the constructor for Composition-0 must be " \ + f"a Node, list, set, tuple or dict; the following are not: 'B'" in str(error_text.value) + elif config[0] == '[[A,B, [C,D]],[E,F]]': # EMBEDDED LIST ERROR (e2) + with pytest.raises(CompositionError) as error_text: + comp = Composition([[A,B, [C,D]],[E,F]]) + assert f"The following entries in a pathway specified for \'Composition-0\' are not " \ + f"a Node (Mechanism or Composition) or a Projection nor a set of either: " \ + f"[(ProcessingMechanism C), (ProcessingMechanism D)]" in str(error_text.value) + elif config[0] == '[{A,B}, [MapProj(B,D)], [C,D]]': # BAD ITEM ERROR (e3) + with pytest.raises(CompositionError) as error_text: + comp = Composition([{A,B}, [MappingProjection(B,D)], [C,D]]) + assert f"Every item in the 'pathways' arg of the constructor for Composition-0 must be " \ + f"a Node, list, set, tuple or dict; the following are not: " \ + f"(MappingProjection MappingProjection from B[OutputPort-0] to D[InputPort-0])" \ + in str(error_text.value) + + else: + assert False, f"BAD CONFIG ARG: {config}" + + # Tests: + if config[1] == 's1': + assert len(A.efferents) == 2 + assert all(len(receiver.path_afferents) == 1 for receiver in {B,C}) + assert all(receiver in [p.receiver.owner for p in A.efferents] for receiver in {B,C}) + assert [A] == comp.get_nodes_by_role(NodeRole.INPUT) + assert all(node in comp.get_nodes_by_role(NodeRole.OUTPUT) for node in {B,C}) + if config[1] == 's2': + assert all(len(sender.efferents) == 1 for sender in {B,C}) + assert len(D.path_afferents) == 2 + assert all(D in [p.receiver.owner for p in receiver.efferents] for receiver in {B,C}) + assert [A] == comp.get_nodes_by_role(NodeRole.SINGLETON) + assert all(node in comp.get_nodes_by_role(NodeRole.INPUT) for node in {B,C}) + assert all(node in comp.get_nodes_by_role(NodeRole.OUTPUT) for node in {A,D}) + if config[1] == 'p1': + assert len(B.efferents) == 1 + assert len(C.path_afferents) == 1 + assert B.efferents[0].receiver.owner == C + assert [A] == comp.get_nodes_by_role(NodeRole.SINGLETON) + assert all(node in comp.get_nodes_by_role(NodeRole.INPUT) for node in {A,B}) + assert all(node in comp.get_nodes_by_role(NodeRole.OUTPUT) for node in {A,C}) + if config[1] == 'p2': + assert all(node in comp.get_nodes_by_role(NodeRole.SINGLETON) for node in {A,B,C}) + if config[1] == 'p3': + assert len(A.efferents) == 1 + assert len(B.path_afferents) == 1 + assert A.efferents[0].receiver.owner == B + assert all(node in comp.get_nodes_by_role(NodeRole.SINGLETON) for node in {C,D}) + if config[1] == 'p4': + assert len(C.efferents) == 1 + assert len(D.path_afferents) == 1 + assert C.efferents[0].receiver.owner == D + assert all(node in comp.get_nodes_by_role(NodeRole.SINGLETON) for node in {A,B}) + assert all(node in comp.get_nodes_by_role(NodeRole.INPUT) for node in {A,B,C}) + assert all(node in comp.get_nodes_by_role(NodeRole.OUTPUT) for node in {A,D}) + if config[1] == 'p5': + assert len(A.efferents) == 1 + assert len(B.path_afferents) == 1 + assert A.efferents[0].receiver.owner == B + assert len(C.efferents) == 1 + assert len(D.path_afferents) == 1 + assert C.efferents[0].receiver.owner == D + assert all(node in comp.get_nodes_by_role(NodeRole.INPUT) for node in {A,C}) + assert all(node in comp.get_nodes_by_role(NodeRole.OUTPUT) for node in {B,D}) def test_add_pathways_bad_arg_error(self): I = InputPort(name='I') c = Composition() with pytest.raises(pnl.CompositionError) as error_text: c.add_pathways(pathways=I) - assert ("The \'pathways\' arg for the add_pathways method" in str(error_text.value) - and "must be a Node, list, tuple, dict or Pathway object" in str(error_text.value)) + assert f"The 'pathways' arg for the add_pathways method of Composition-0 must be a " \ + f"Node, list, set, tuple, dict or Pathway object: (InputPort I [Deferred Init])." \ + in str(error_text.value) def test_add_pathways_arg_pathways_list_and_item_not_list_or_dict_or_node_error(self): A = ProcessingMechanism(name='A') @@ -882,8 +1027,8 @@ def test_add_pathways_arg_pathways_list_and_item_not_list_or_dict_or_node_error( c = Composition() with pytest.raises(pnl.CompositionError) as error_text: c.add_pathways(pathways=[[A,B], 'C']) - assert ("Every item in the \'pathways\' arg for the add_pathways method" in str(error_text.value) - and "must be a Node, list, tuple or dict:" in str(error_text.value)) + assert f"Every item in the 'pathways' arg for the add_pathways method of Composition-0 must be a " \ + f"Node, list, set, tuple or dict; the following are not: 'C'" in str(error_text.value) def test_for_add_processing_pathway_recursion_error(self): A = TransferMechanism() @@ -902,6 +1047,7 @@ def test_for_add_learning_pathway_recursion_error(self): f"add_backpropagation_learning_pathway method of {C.name}." in str(error_text.value) +@pytest.mark.pathways class TestDuplicatePathwayWarnings: def test_add_processing_pathway_exact_duplicate_warning(self): @@ -936,9 +1082,10 @@ def test_add_processing_pathway_subset_duplicate_warning(self): comp.add_linear_processing_pathway(pathway=[A,B,C]) regexp = "Pathway specified in 'pathway' arg for add_linear_procesing_pathway method .*"\ - f"has same Nodes in same order as one already in {comp.name}" + f"has same Nodes in same order as one already in {comp.name}" with pytest.warns(UserWarning, match=regexp): comp.add_linear_processing_pathway(pathway=[A,B]) + assert True def test_add_backpropagation_pathway_exact_duplicate_warning(self): A = TransferMechanism() @@ -996,6 +1143,7 @@ def test_add_processing_pathway_same_nodes_but_reversed_order_is_OK(self): len(comp.pathways)==2 +@pytest.mark.pathways class TestCompositionPathwaysArg: def test_composition_pathways_arg_pathway_object(self): @@ -1042,6 +1190,202 @@ def test_composition_pathways_arg_mech(self): PathwayRole.OUTPUT, PathwayRole.TERMINAL} + def test_composition_pathways_arg_set(self): + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + c = Composition({A,B,C}) + # # MODIFIED 4/11/22 OLD: + # # assert all(name in c.pathways.names for name in {'Pathway-0', 'Pathway-1', 'Pathway-2'}) + # MODIFIED 4/11/22 NEW: + assert all(name in c.pathways.names for name in {'Pathway-0'}) + # MODIFIED 4/11/22 END + assert all(set(c.get_roles_by_node(node)) == {NodeRole.INPUT, + NodeRole.ORIGIN, + NodeRole.SINGLETON, + NodeRole.OUTPUT, + NodeRole.TERMINAL} + for node in {A,B,C}) + # assert all(set(c.pathways[i].roles) == {PathwayRole.INPUT, + # PathwayRole.ORIGIN, + # PathwayRole.SINGLETON, + # PathwayRole.OUTPUT, + # PathwayRole.TERMINAL} + # for i in range(0,1)) + + with pytest.raises(CompositionError) as err: + d = Composition({A,B,C.input_port}) + assert f"Every item in the \'pathways\' arg of the constructor for Composition-1 must be " \ + f"a Node, list, set, tuple or dict; the following are not: (InputPort InputPort-0)" in str(err.value) + + @pytest.mark.parametrize("nodes_config", [ + "many_many", + "many_one_many", + ]) + @pytest.mark.parametrize("projs", [ + "none", + "default_proj", + "matrix_spec", + "some_projs_no_default", + "some_projs_and_matrix_spec", + "some_projs_and_default_proj" + ]) + @pytest.mark.parametrize("set_or_list", [ + "set", + "list" + ]) + def test_composition_pathways_arg_with_various_set_or_list_configurations(self, nodes_config, projs, set_or_list): + import itertools + + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + # FIX: 4/9/22 - INCLUDE TWO PORT MECHANISM: + # B_comparator = ComparatorMechanism(name='B COMPARATOR') + C = ProcessingMechanism(name='C') + D = ProcessingMechanism(name='D') + E = ProcessingMechanism(name='E') + F = ProcessingMechanism(name='F') + M = ProcessingMechanism(name='M') + # C = A.input_port + # proj = MappingProjection(sender=A, receiver=B) + + default_proj = MappingProjection(matrix=[2]) + default_matrix = [1] + # For many_many: + A_D = MappingProjection(sender=A, receiver=D, matrix=[2]) + B_D = MappingProjection(sender=B, receiver=D, matrix=[3]) + B_E = MappingProjection(sender=B, receiver=E, matrix=[4]) + C_E = MappingProjection(sender=C, receiver=E, matrix=[5]) + # For many_one_many: + A_M = MappingProjection(sender=A, receiver=M, matrix=[6]) + C_M = MappingProjection(sender=C, receiver=M, matrix=[7]) + M_D = MappingProjection(sender=M, receiver=D, matrix=[8]) + M_F = MappingProjection(sender=M, receiver=F, matrix=[9]) + B_M = MappingProjection(sender=B, receiver=M, matrix=[100]) + + nodes_1 = {A,B,C} + nodes_2 = {D,E,F} + # FIX: 4/9/22 - MODIFY TO INCLUDE many to first (set->list) and last to many(list->set) + # FIX: 4/9/22 - INCLUDE PORT SPECS: + # nodes_1 = {A.output_port,B,C} if set_or_list == 'set' else [A.output_port,B,C] + # nodes_2 = {D,E,F.input_port} if set_or_list == 'set' else [D,E,F.input_port] + + if projs != "none": + if nodes_config == "many_many": + projections = { + "default_proj": default_proj, + "matrix_spec": [10], + "some_projs_no_default": {A_D, B_E} if set_or_list == 'set' else [A_D, B_E], + "some_projs_and_matrix_spec": [A_D, C_E, default_matrix], # matrix spec requires list + "some_projs_and_default_proj": + {B_D, B_E, default_proj} if set_or_list == 'set' else [B_D, B_E, default_proj] + } + elif nodes_config == "many_one_many": + # Tuples with first item for nodes_1 -> M and second item M -> nodes_2 + projections = { + "default_proj": (default_proj, default_proj), + "matrix_spec": ([11], [12]), + "some_projs_no_default": + ({A_M, C_M}, {M_D, M_F}) if set_or_list == 'set' else ([A_M, C_M], [M_D, M_F]), + "some_projs_and_matrix_spec": ([A_M, C_M, default_matrix], + [M_D, M_F, default_matrix]), # matrix spec requires list + "some_projs_and_default_proj": + ({A_M, C_M, default_proj}, {M_D, M_F, default_proj}) + if set_or_list == 'set' else ([A_M, C_M, default_proj], [M_D, M_F, default_proj]) + } + else: + assert False, f"TEST ERROR: No handling for '{nodes_config}' condition." + + if projs in {'default_proj', 'some_projs_and_default_proj'}: + matrix_val = default_proj._init_args['matrix'] + elif projs == 'matrix_spec': + matrix_val = projections[projs] + elif projs == "some_projs_and_matrix_spec": + matrix_val = default_matrix + + if nodes_config == "many_many": + + if projs == 'none': + comp = Composition([nodes_1, nodes_2]) + matrix_val = default_matrix + else: + comp = Composition([nodes_1, projections[projs], nodes_2]) + + if projs == "some_projs_no_default": + assert A_D in comp.projections + assert B_E in comp.projections + # Pre-specified Projections that were not included in pathways should not be in Composition: + assert B_D not in comp.projections + assert C_E not in comp.projections + assert C in comp.get_nodes_by_role(NodeRole.SINGLETON) + assert F in comp.get_nodes_by_role(NodeRole.SINGLETON) + + else: + # If there is no Projection specification or a default one, then there should be all-to-all Projections + # Each sender projects to all three 3 receivers + assert all(len([p for p in node.efferents if p in comp.projections])==3 for node in {A,B,C}) + # Each receiver gets Projections from all 3 senders + assert all(len([p for p in node.path_afferents if p in comp.projections])==3 for node in {D,E,F}) + for sender,receiver in itertools.product([A,B,C],[D,E,F]): + # Each sender projects to each of the receivers + assert sender in {p.sender.owner for p in receiver.path_afferents if p in comp.projections} + # Each receiver receives a Projection from each of the senders + assert receiver in {p.receiver.owner for p in sender.efferents if p in comp.projections} + + # Matrices for pre-specified Projections should preserve their specified value: + A_D.parameters.matrix.get() == [2] + B_D.parameters.matrix.get() == [3] + B_E.parameters.matrix.get() == [4] + C_E.parameters.matrix.get() == [5] + # Matrices for pairs without pre-specified Projections should be assigned value of default + assert [p.parameters.matrix.get() for p in A.efferents if p.receiver.owner.name == 'E'] == matrix_val + assert [p.parameters.matrix.get() for p in A.efferents if p.receiver.owner.name == 'F'] == matrix_val + assert [p.parameters.matrix.get() for p in B.efferents if p.receiver.owner.name == 'F'] == matrix_val + assert [p.parameters.matrix.get() for p in C.efferents if p.receiver.owner.name == 'D'] == matrix_val + assert [p.parameters.matrix.get() for p in C.efferents if p.receiver.owner.name == 'F'] == matrix_val + + elif nodes_config == 'many_one_many': + if projs == 'none': + comp = Composition([nodes_1, M, nodes_2]) + matrix_val = default_matrix + + else: + comp = Composition([nodes_1, projections[projs][0], M, projections[projs][1], nodes_2]) + if projs == 'matrix_spec': + matrix_val = projections[projs][1] + + if projs == "some_projs_no_default": + assert all(p in comp.projections for p in {A_M, C_M, M_D, M_F}) + # Pre-specified Projections that were not included in pathways should not be in Composition: + assert B_M not in comp.projections + assert B in comp.get_nodes_by_role(NodeRole.SINGLETON) + assert E in comp.get_nodes_by_role(NodeRole.SINGLETON) + + else: + # Each sender projects to just one receiver + assert all(len([p for p in node.efferents if p in comp.projections])==1 for node in {A,B,C}) + # Each receiver receives from just one sender + assert all(len([p for p in node.path_afferents if p in comp.projections])==1 for node in {D,E,F}) + for sender,receiver in itertools.product([A,B,C],[M]): + # Each sender projects to M: + assert sender in {p.sender.owner for p in receiver.path_afferents if p in comp.projections} + # Each receiver receives from M: + assert receiver in {p.receiver.owner for p in sender.efferents if p in comp.projections} + # Matrices for pre-specified Projections should preserve their specified value: + A_M.parameters.matrix.get() == [6] + C_M.parameters.matrix.get() == [7] + M_D.parameters.matrix.get() == [8] + M_F.parameters.matrix.get() == [9] + # Matrices for pairs without pre-specified Projections should be assigned value of default + assert [p.parameters.matrix.get() for p in B.efferents if p.receiver.owner.name == 'M'] == [100] + assert [p.parameters.matrix.get() for p in M.efferents if p.receiver.owner.name == 'E'] == matrix_val + + else: + assert False, f"TEST ERROR: No handling for '{nodes_config}' condition." + + def test_pathways_examples(self): + pass + def test_composition_pathways_arg_dict_and_list_and_pathway_roles(self): A = ProcessingMechanism(name='A') B = ProcessingMechanism(name='B') @@ -1107,8 +1451,8 @@ def test_composition_pathways_arg_pathways_list_and_item_not_list_or_dict_or_nod B = ProcessingMechanism(name='B') with pytest.raises(pnl.CompositionError) as error_text: c = Composition(pathways=[[A,B], 'C']) - assert ("Every item in the \'pathways\' arg of the constructor" in str(error_text.value) and - "must be a Node, list, tuple or dict:" in str(error_text.value)) + assert ("Every item in the 'pathways' arg of the constructor for Composition-0 must be " + "a Node, list, set, tuple or dict; the following are not: 'C'" in str(error_text.value)) def test_composition_pathways_arg_pathways_dict_and_item_not_list_dict_or_node_error(self): A = ProcessingMechanism(name='A') @@ -1117,8 +1461,8 @@ def test_composition_pathways_arg_pathways_dict_and_item_not_list_dict_or_node_e D = ProcessingMechanism(name='D') with pytest.raises(pnl.CompositionError) as error_text: c = Composition(pathways=[{'P1':[A,B]}, 'C']) - assert ("Every item in the \'pathways\' arg of the constructor" in str(error_text.value) and - "must be a Node, list, tuple or dict:" in str(error_text.value)) + assert ("Every item in the 'pathways' arg of the constructor for Composition-0 must be " + "a Node, list, set, tuple or dict; the following are not: 'C'" in str(error_text.value)) def test_composition_pathways_arg_dict_with_more_than_one_entry_error(self): A = ProcessingMechanism(name='A') @@ -1212,16 +1556,17 @@ def test_composition_pathways_bad_arg_error(self): I = InputPort(name='I') with pytest.raises(pnl.CompositionError) as error_text: c = Composition(pathways=I) - assert ("The \'pathways\' arg of the constructor" in str(error_text.value) and - "must be a Node, list, tuple, dict or Pathway object" in str(error_text.value)) + assert ("The 'pathways' arg of the constructor for Composition-0 must be a Node, list, " + "set, tuple, dict or Pathway object: (InputPort I [Deferred Init])." + in str(error_text.value)) def test_composition_arg_pathways_list_and_item_not_list_or_dict_or_node_error(self): A = ProcessingMechanism(name='A') B = ProcessingMechanism(name='B') with pytest.raises(pnl.CompositionError) as error_text: c = Composition(pathways=[[A,B], 'C']) - assert ("Every item in the \'pathways\' arg of the constructor" in str(error_text.value) and - "must be a Node, list, tuple or dict:" in str(error_text.value)) + assert ("Every item in the 'pathways' arg of the constructor for Composition-0 must be a " + "Node, list, set, tuple or dict; the following are not: 'C'" in str(error_text.value)) def test_composition_learning_pathway_dict_and_list_error(self): A = ProcessingMechanism(name='A') @@ -1695,7 +2040,7 @@ def test_recurrent_transfer_mechanisms(self): output = comp.run(inputs={R1: [1.0]}, num_trials=3) assert np.allclose(output, [[np.array([22.])]]) - +@pytest.mark.pathways class TestExecutionOrder: def test_2_node_loop(self): A = ProcessingMechanism(name="A") @@ -2406,7 +2751,7 @@ def test_exact_time(self): assert comp.scheduler.execution_list[comp.default_execution_id] == [{A, B}] assert comp.scheduler.execution_timestamps[comp.default_execution_id][0].absolute == 1 * pnl._unit_registry.ms - +@pytest.mark.pathways class TestGetMechanismsByRole: def test_multiple_roles(self): @@ -3283,8 +3628,9 @@ def test_lpp_invalid_matrix_keyword(self): with pytest.raises(CompositionError) as error_text: # Typo in IdentityMatrix comp.add_linear_processing_pathway([A, "IdntityMatrix", B]) - assert ("An entry in \'pathway\' arg for add_linear_procesing_pathway method" in str(error_text.value) and - "is not a Node (Mechanism or Composition) or a Projection: \'IdntityMatrix\'." in str(error_text.value)) + assert (f"The following entries in a pathway specified for 'Composition-0' are not a Node " + f"(Mechanism or Composition) or a Projection nor a set of either: 'IdntityMatrix'" + in str(error_text.value)) @pytest.mark.composition def test_LPP_two_origins_one_terminal(self, comp_mode): @@ -4585,153 +4931,7 @@ def test_combine_two_overlapping_trees(self): assert len(terminals) == 1 assert myMech5 in terminals - # MODIFIED 5/8/20 OLD: ELIMINATE SYSTEM: - # FIX SHOULD THESE BE RE-WRITTEN WITH STANDARD NESTED COMPOSITIONS AND PATHWAYS? - # def test_one_pathway_inside_one_system(self): - # # create a PathwayComposition | blank slate for composition - # myPath = PathwayComposition() - # - # # create mechanisms to add to myPath - # myMech1 = TransferMechanism(function=Linear(slope=2.0)) # 1 x 2 = 2 - # myMech2 = TransferMechanism(function=Linear(slope=2.0)) # 2 x 2 = 4 - # myMech3 = TransferMechanism(function=Linear(slope=2.0)) # 4 x 2 = 8 - # - # # add mechanisms to myPath with default MappingProjections between them - # myPath.add_linear_processing_pathway([myMech1, myMech2, myMech3]) - # - # # assign input to origin mech - # stimulus = {myMech1: [[1]]} - # - # # execute path (just for comparison) - # myPath.run(inputs=stimulus) - # - # # create a SystemComposition | blank slate for composition - # sys = SystemComposition() - # - # # add a PathwayComposition [myPath] to the SystemComposition [sys] - # sys.add_pathway(myPath) - # - # # execute the SystemComposition - # output = sys.run(inputs=stimulus) - # assert np.allclose([8], output) - # - # def test_two_paths_converge_one_system(self): - # - # # mech1 ---> mech2 -- - # # --> mech3 - # # mech4 ---> mech5 -- - # - # # 1x2=2 ---> 2x2=4 -- - # # --> (4+4)x2=16 - # # 1x2=2 ---> 2x2=4 -- - # - # # create a PathwayComposition | blank slate for composition - # myPath = PathwayComposition() - # - # # create mechanisms to add to myPath - # myMech1 = TransferMechanism(function=Linear(slope=2.0)) # 1 x 2 = 2 - # myMech2 = TransferMechanism(function=Linear(slope=2.0)) # 2 x 2 = 4 - # myMech3 = TransferMechanism(function=Linear(slope=2.0)) # 4 x 2 = 8 - # - # # add mechanisms to myPath with default MappingProjections between them - # myPath.add_linear_processing_pathway([myMech1, myMech2, myMech3]) - # - # myPath2 = PathwayComposition() - # myMech4 = TransferMechanism(function=Linear(slope=2.0)) # 1 x 2 = 2 - # myMech5 = TransferMechanism(function=Linear(slope=2.0)) # 2 x 2 = 4 - # myPath2.add_linear_processing_pathway([myMech4, myMech5, myMech3]) - # - # sys = SystemComposition() - # sys.add_pathway(myPath) - # sys.add_pathway(myPath2) - # # assign input to origin mechs - # stimulus = {myMech1: [[1]], myMech4: [[1]]} - # - # # schedule = Scheduler(composition=sys) - # output = sys.run(inputs=stimulus) - # assert np.allclose(16, output) - # - # def test_two_paths_in_series_one_system(self): - # - # # [ mech1 --> mech2 --> mech3 ] --> [ mech4 --> mech5 --> mech6 ] - # # 1x2=2 --> 2x2=4 --> 4x2=8 --> (8+1)x2=18 --> 18x2=36 --> 36*2=64 - # # X - # # | - # # 1 - # # (if mech4 were recognized as an origin mech, and used SOFT_CLAMP, we would expect the final result to be 72) - # # create a PathwayComposition | blank slate for composition - # myPath = PathwayComposition() - # - # # create mechanisms to add to myPath - # myMech1 = TransferMechanism(function=Linear(slope=2.0)) # 1 x 2 = 2 - # myMech2 = TransferMechanism(function=Linear(slope=2.0)) # 2 x 2 = 4 - # myMech3 = TransferMechanism(function=Linear(slope=2.0)) # 4 x 2 = 8 - # - # # add mechanisms to myPath with default MappingProjections between them - # myPath.add_linear_processing_pathway([myMech1, myMech2, myMech3]) - # - # myPath2 = PathwayComposition() - # myMech4 = TransferMechanism(function=Linear(slope=2.0)) - # myMech5 = TransferMechanism(function=Linear(slope=2.0)) - # myMech6 = TransferMechanism(function=Linear(slope=2.0)) - # myPath2.add_linear_processing_pathway([myMech4, myMech5, myMech6]) - # - # sys = SystemComposition() - # sys.add_pathway(myPath) - # sys.add_pathway(myPath2) - # sys.add_projection(projection=MappingProjection(sender=myMech3, - # receiver=myMech4), sender=myMech3, receiver=myMech4) - # - # # assign input to origin mechs - # # myMech4 ignores its input from the outside world because it is no longer considered an origin! - # stimulus = {myMech1: [[1]]} - # - # # schedule = Scheduler(composition=sys) - # output = sys.run(inputs=stimulus) - # - # assert np.allclose([64], output) - # - # def test_two_paths_converge_one_system_scheduling_matters(self): - # - # # mech1 ---> mech2 -- - # # --> mech3 - # # mech4 ---> mech5 -- - # - # # 1x2=2 ---> 2x2=4 -- - # # --> (4+4)x2=16 - # # 1x2=2 ---> 2x2=4 -- - # - # # create a PathwayComposition | blank slate for composition - # myPath = PathwayComposition() - # - # # create mechanisms to add to myPath - # myMech1 = IntegratorMechanism(function=Linear(slope=2.0)) # 1 x 2 = 2 - # myMech2 = TransferMechanism(function=Linear(slope=2.0)) # 2 x 2 = 4 - # myMech3 = TransferMechanism(function=Linear(slope=2.0)) # 4 x 2 = 8 - # - # # add mechanisms to myPath with default MappingProjections between them - # myPath.add_linear_processing_pathway([myMech1, myMech2, myMech3]) - # - # myPathScheduler = Scheduler(composition=myPath) - # myPathScheduler.add_condition(myMech2, AfterNCalls(myMech1, 2)) - # - # myPath.run(inputs={myMech1: [[1]]}, scheduler=myPathScheduler) - # myPath.run(inputs={myMech1: [[1]]}, scheduler=myPathScheduler) - # myPath2 = PathwayComposition() - # myMech4 = TransferMechanism(function=Linear(slope=2.0)) # 1 x 2 = 2 - # myMech5 = TransferMechanism(function=Linear(slope=2.0)) # 2 x 2 = 4 - # myPath2.add_linear_processing_pathway([myMech4, myMech5, myMech3]) - # - # sys = SystemComposition() - # sys.add_pathway(myPath) - # sys.add_pathway(myPath2) - # # assign input to origin mechs - # stimulus = {myMech1: [[1]], myMech4: [[1]]} - # - # # schedule = Scheduler(composition=sys) - # output = sys.run(inputs=stimulus) - # assert np.allclose(16, output) - # MODIFIED 5/8/20 END + @pytest.mark.pathways def test_three_level_deep_pathway_routing_single_mech(self): p2 = ProcessingMechanism(name='p2') p0 = ProcessingMechanism(name='p0') @@ -4744,6 +4944,7 @@ def test_three_level_deep_pathway_routing_single_mech(self): result = c0.run([5]) assert result == [5] + @pytest.mark.pathways def test_three_level_deep_pathway_routing_two_mech(self): p3a = ProcessingMechanism(name='p3a') p3b = ProcessingMechanism(name='p3b') @@ -4759,6 +4960,7 @@ def test_three_level_deep_pathway_routing_two_mech(self): result = c1.run([5]) assert result == [5, 5] + @pytest.mark.pathways def test_three_level_deep_modulation_routing_single_mech(self): p3 = ProcessingMechanism(name='p3') ctrl1 = ControlMechanism(name='ctrl1', @@ -4771,6 +4973,7 @@ def test_three_level_deep_modulation_routing_single_mech(self): result = c1.run({c2: 2, ctrl1: 5}) assert result == [10] + @pytest.mark.pathways def test_three_level_deep_modulation_routing_two_mech(self): p3a = ProcessingMechanism(name='p3a') p3b = ProcessingMechanism(name='p3b') @@ -4787,6 +4990,7 @@ def test_three_level_deep_modulation_routing_two_mech(self): result = c1.run({c2: [[2], [2]], ctrl1: [5]}) assert result == [10, 10] + @pytest.mark.pathways @pytest.mark.state_features def test_four_level_nested_transfer_mechanism_composition_parallel(self): # mechanisms @@ -7147,6 +7351,31 @@ def test_controller_role(self): assert comp.get_nodes_by_role(NodeRole.CONTROLLER) == [comp.controller] assert comp.nodes_to_roles[comp.controller] == {NodeRole.CONTROLLER} + def test_inactive_terminal_projection(self): + A = pnl.ProcessingMechanism(name='A') + B = pnl.ProcessingMechanism(name='B') + C = pnl.ProcessingMechanism(name='C') + D = pnl.ProcessingMechanism(name='D') + + pnl.MappingProjection(sender=A, receiver=D) + comp = pnl.Composition([[A],[B,C]]) + + assert comp.nodes_to_roles[A] == {NodeRole.INPUT, NodeRole.OUTPUT, NodeRole.SINGLETON, NodeRole.ORIGIN, NodeRole.TERMINAL} + + def test_feedback_projection_added_by_pathway(self): + A = pnl.ProcessingMechanism(name='A') + B = pnl.ProcessingMechanism(name='B') + C = pnl.ProcessingMechanism(name='C') + + icomp = pnl.Composition(pathways=[C]) + ocomp = pnl.Composition(pathways=[A, icomp, (B, pnl.NodeRole.FEEDBACK_SENDER), A]) + + assert ocomp.nodes_to_roles == { + A: {NodeRole.ORIGIN, NodeRole.INPUT, NodeRole.FEEDBACK_RECEIVER}, + icomp: {NodeRole.INTERNAL}, + B: {NodeRole.TERMINAL, NodeRole.OUTPUT, NodeRole.FEEDBACK_SENDER}, + } + class TestMisc: @@ -7386,6 +7615,52 @@ def test_remove_node_learning(self): comp.remove_node(D) comp.learn(inputs={n: [0] for n in comp.get_nodes_by_role(pnl.NodeRole.INPUT)}) + def test_rebuild_scheduler_after_add_node(self): + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + + comp = Composition(pathways=[A, C]) + + comp.scheduler.add_condition(C, pnl.EveryNCalls(A, 2)) + comp.add_node(B) + comp.scheduler.add_condition(B, pnl.EveryNCalls(A, 2)) + + comp.run(inputs={A: [0], B: [0]}) + + assert type(comp.scheduler.conditions[A]) is pnl.Always + assert( + type(comp.scheduler.conditions[B]) is pnl.EveryNCalls + and comp.scheduler.conditions[B].args == (A, 2) + ) + assert( + type(comp.scheduler.conditions[C]) is pnl.EveryNCalls + and comp.scheduler.conditions[C].args == (A, 2) + ) + assert comp.scheduler.execution_list[comp.default_execution_id] == [{A}, {A, B}, {C}] + assert set(comp.scheduler._user_specified_conds.keys()) == {B, C} + + def test_rebuild_scheduler_after_remove_node(self): + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + + comp = Composition(pathways=[[A, C], [B, C]]) + + comp.scheduler.add_condition(C, pnl.EveryNCalls(A, 2)) + comp.remove_node(B) + + comp.run(inputs={A: [0]}) + + assert type(comp.scheduler.conditions[A]) is pnl.Always + assert B not in comp.scheduler.conditions + assert( + type(comp.scheduler.conditions[C]) is pnl.EveryNCalls + and comp.scheduler.conditions[C].args == (A, 2) + ) + assert comp.scheduler.execution_list[comp.default_execution_id] == [{A}, {A}, {C}] + assert set(comp.scheduler._user_specified_conds.keys()) == {C} + class TestInputSpecsDocumentationExamples: diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 26169e34515..706dc08ef19 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -2464,10 +2464,15 @@ def test_modulation_of_random_state_direct(self, comp_mode, benchmark, prng): if prng == 'Default': prngs = {s:np.random.RandomState([s]) for s in seeds} + def get_val(s, dty): + return prngs[s].uniform() elif prng == 'Philox': prngs = {s:_SeededPhilox([s]) for s in seeds} + def get_val(s, dty): + return prngs[s].random(dtype=dty) - expected = [prngs[s].uniform() for s in seeds] * 2 + dty = np.float32 if pytest.helpers.llvm_current_fp_precision() == 'fp32' else np.float64 + expected = [get_val(s, dty) for s in seeds] * 2 assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), expected) @pytest.mark.benchmark @@ -2496,10 +2501,15 @@ def test_modulation_of_random_state_DDM(self, comp_mode, benchmark, prng): # cycle over the seeds twice setting and resetting the random state benchmark(comp.run, inputs={ctl_mech:seeds, mech:5.0}, num_trials=len(seeds) * 2, execution_mode=comp_mode) + precision = pytest.helpers.llvm_current_fp_precision() if prng == 'Default': assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[100, 21], [100, 23], [100, 20]] * 2) - elif prng == 'Philox': + elif prng == 'Philox' and precision == 'fp64': assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[100, 19], [100, 21], [100, 21]] * 2) + elif prng == 'Philox' and precision == 'fp32': + assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[100, 17], [100, 22], [100, 20]] * 2) + else: + assert False, "Unknown PRNG!" @pytest.mark.benchmark @pytest.mark.control @@ -2525,10 +2535,15 @@ def test_modulation_of_random_state_DDM_Analytical(self, comp_mode, benchmark, p # cycle over the seeds twice setting and resetting the random state benchmark(comp.run, inputs={ctl_mech:seeds, mech:0.1}, num_trials=len(seeds) * 2, execution_mode=comp_mode) + precision = pytest.helpers.llvm_current_fp_precision() if prng == 'Default': assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[-1, 3.99948962], [1, 3.99948962], [-1, 3.99948962]] * 2) - elif prng == 'Philox': + elif prng == 'Philox' and precision == 'fp64': assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[-1, 3.99948962], [-1, 3.99948962], [1, 3.99948962]] * 2) + elif prng == 'Philox' and precision == 'fp32': + assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[1, 3.99948978], [-1, 3.99948978], [1, 3.99948978]] * 2) + else: + assert False, "Unknown PRNG!" @pytest.mark.control @pytest.mark.composition @@ -2608,41 +2623,27 @@ def test_ocm_default_function(self): assert type(comp.controller.function) == pnl.GridSearch assert comp.run([1]) == [10] - def test_ocm_searchspace_arg(self): - a = pnl.ProcessingMechanism() - comp = pnl.Composition( - controller_mode=pnl.BEFORE, - nodes=[a], - controller=pnl.OptimizationControlMechanism( - control=pnl.ControlSignal( - modulates=(pnl.SLOPE, a), - intensity_cost_function=lambda x: 0, - adjustment_cost_function=lambda x: 0, - ), - state_features=[a.input_port], - objective_mechanism=pnl.ObjectiveMechanism( - monitor=[a.output_port] - ), - search_space=[pnl.SampleIterator([1, 10])] - ) - ) - assert type(comp.controller.function) == pnl.GridSearch - assert comp.run([1]) == [10] + @pytest.mark.parametrize("nested", [True, False]) + @pytest.mark.parametrize("format", ["list", "tuple", "SampleIterator", "SampleIteratorArray", "SampleSpec", "ndArray"]) + @pytest.mark.parametrize("mode", pytest.helpers.get_comp_execution_modes() + + [pytest.helpers.cuda_param('Python-PTX'), + pytest.param('Python-LLVM', marks=pytest.mark.llvm)]) + def test_ocm_searchspace_format_equivalence(self, format, nested, mode): + if str(mode).startswith('Python-'): + ocm_mode = mode.split('-')[1] + mode = pnl.ExecutionMode.Python + else: + # OCM default mode is Python + ocm_mode = 'Python' - @pytest.mark.parametrize("format,nested", - [("list", True), ("list", False), - ("tuple", True), ("tuple", False), - ("SampleIterator", True), ("SampleIterator", False), - ("SampleSpec", True), ("SampleSpec", False), - ("ndArray", True), ("ndArray", False), - ],) - def test_ocm_searchspace_format_equivalence(self, format, nested): if format == "list": search_space = [1, 10] elif format == "tuple": search_space = (1, 10) elif format == "SampleIterator": - search_space = SampleIterator((1,10)) + search_space = SampleIterator((1, 10)) + elif format == "SampleIteratorArray": + search_space = SampleIterator([1, 10]) elif format == "SampleSpec": search_space = SampleSpec(1, 10, 9) elif format == "ndArray": @@ -2658,8 +2659,7 @@ def test_ocm_searchspace_format_equivalence(self, format, nested): controller=pnl.OptimizationControlMechanism( control=pnl.ControlSignal( modulates=(pnl.SLOPE, a), - intensity_cost_function=lambda x: 0, - adjustment_cost_function=lambda x: 0, + cost_options=None ), state_features=[a.input_port], objective_mechanism=pnl.ObjectiveMechanism( @@ -2668,8 +2668,10 @@ def test_ocm_searchspace_format_equivalence(self, format, nested): search_space=search_space ) ) + comp.controller.comp_execution_mode = ocm_mode + assert type(comp.controller.function) == pnl.GridSearch - assert comp.run([1]) == [10] + assert comp.run([1], execution_mode=mode) == [[10]] def test_evc(self): # Mechanisms diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index cdf3c289165..cbba3e2d0c8 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -2291,9 +2291,9 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu @pytest.mark.parametrize('order', [ - 'color_full', - 'word_partial', - 'word_full', + # 'color_full', + # 'word_partial', + # 'word_full', 'full_overlap' ]) def test_stroop_model_learning(self, order): diff --git a/tests/functions/test_default_allocation.py b/tests/functions/test_default_allocation.py deleted file mode 100644 index 4486e7d7042..00000000000 --- a/tests/functions/test_default_allocation.py +++ /dev/null @@ -1,16 +0,0 @@ -import numpy as np -import pytest - -import psyneulink.core.llvm as pnlvm -from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import DefaultAllocationFunction - -@pytest.mark.function -@pytest.mark.identity_function -@pytest.mark.benchmark(group="IdentityFunction") -def test_basic(benchmark, func_mode): - variable = np.random.rand(1) - f = DefaultAllocationFunction() - EX = pytest.helpers.get_func_execution(f, func_mode) - - res = benchmark(EX, variable) - assert np.allclose(res, variable) diff --git a/tests/functions/test_distribution.py b/tests/functions/test_distribution.py index dcdf066e092..2b0d111d2c3 100644 --- a/tests/functions/test_distribution.py +++ b/tests/functions/test_distribution.py @@ -1,5 +1,6 @@ import numpy as np import pytest +import sys import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions.nonstateful.distributionfunctions as Functions @@ -14,61 +15,119 @@ RAND4 = np.random.rand() RAND5 = np.random.rand() +dda_expected_default = (1.9774974807292212, 0.012242689689501842, + 1.9774974807292207, 1.3147677945132479, 1.7929299891370192, + 1.9774974807292207, 1.3147677945132479, 1.7929299891370192) +dda_expected_random = (0.4236547993389047, -2.7755575615628914e-17, + 0.5173675420165031, 0.06942854144616283, 6.302631815990666, + 1.4934079600147951, 0.4288991185241868, 1.7740760781361433) +dda_expected_negative = (0.42365479933890504, 0.0, + 0.5173675420165031, 0.06942854144616283, 6.302631815990666, + 1.4934079600147951, 0.4288991185241868, 1.7740760781361433) +dda_expected_small = (0.5828813465336954, 0.04801236718458773, + 0.532471083815943, 0.09633801362499317, 6.111833139205608, + 1.5821207676710864, 0.5392724012504414, 1.8065252817609618) +# Different libm implementations produce slightly different results +if sys.platform.startswith("win") or sys.platform.startswith("darwin"): + dda_expected_small = (0.5828813465336954, 0.04801236718458773, + 0.5324710838150166, 0.09633802135385469, 6.119380538293901, + 1.58212076767016, 0.5392724012504414, 1.8065252817609618) + +normal_expected_mt = (1.0890232855122397) +uniform_expected_mt = (0.6879771504250405) +normal_expected_philox = (0.5910357654927911) +uniform_expected_philox = (0.6043448764869507) + +llvm_expected = {} +llvm_expected = {'fp64': {}, 'fp32': {}} +llvm_expected['fp64'][dda_expected_small] = (0.5828813465336954, 0.04801236718458773, + 0.5324710838085324, 0.09633787836991654, 6.0158766570416775, + 1.5821207675877176, 0.5392731045768397, 1.8434859117411773) + +# add fp32 results +llvm_expected['fp32'][dda_expected_random] = (0.42365485429763794, 0.0, + 0.5173675417900085, 0.06942801177501678, 6.302331447601318, + 1.4934077262878418, 0.428894966840744, 1.7738982439041138) +llvm_expected['fp32'][dda_expected_negative] = (0.4236549735069275, 5.960464477539063e-08, + 0.5173678398132324, 0.06942889094352722, 6.303247451782227, + 1.4934080839157104, 0.42889583110809326, 1.7739603519439697) +llvm_expected['fp32'][dda_expected_small] = None +llvm_expected['fp32'][normal_expected_philox] = (0.5655658841133118) +llvm_expected['fp32'][uniform_expected_philox] = (0.6180108785629272) + test_data = [ - (Functions.DriftDiffusionAnalytical, test_var, {}, None, - (1.9774974807292212, 0.012242689689501842, 1.9774974807292207, 1.3147677945132479, 1.7929299891370192, 1.9774974807292207, 1.3147677945132479, 1.7929299891370192)), - (Functions.DriftDiffusionAnalytical, test_var, {"drift_rate": RAND1, "threshold": RAND2, "starting_value": RAND3, "non_decision_time":RAND4, "noise": RAND5}, None, - (0.4236547993389047, -2.7755575615628914e-17, 0.5173675420165031, 0.06942854144616283, 6.302631815990666, 1.4934079600147951, 0.4288991185241868, 1.7740760781361433)), - (Functions.DriftDiffusionAnalytical, -test_var, {"drift_rate": RAND1, "threshold": RAND2, "starting_value": RAND3, "non_decision_time":RAND4, "noise": RAND5}, None, - (0.42365479933890504, 0.0, 0.5173675420165031, 0.06942854144616283, 6.302631815990666, 1.4934079600147951, 0.4288991185241868, 1.7740760781361433)), -# FIXME: Rounding errors result in different behaviour on different platforms -# (Functions.DriftDiffusionAnalytical, 1e-4, {"drift_rate": 1e-5, "threshold": RAND2, "starting_value": RAND3, "non_decision_time":RAND4, "noise": RAND5}, "Rounding errors", -# (0.5828813465336954, 0.04801236718458773, 0.532471083815943, 0.09633801362499317, 6.111833139205608, 1.5821207676710864, 0.5392724012504414, 1.8065252817609618)), + pytest.param(Functions.DriftDiffusionAnalytical, test_var, {}, None, None, + dda_expected_default, id="DriftDiffusionAnalytical-DefaultParameters"), + pytest.param(Functions.DriftDiffusionAnalytical, test_var, + {"drift_rate": RAND1, "threshold": RAND2, "starting_value": RAND3, + "non_decision_time":RAND4, "noise": RAND5}, None, None, + dda_expected_random, id="DriftDiffusionAnalytical-RandomParameters"), + pytest.param(Functions.DriftDiffusionAnalytical, -test_var, + {"drift_rate": RAND1, "threshold": RAND2, "starting_value": RAND3, + "non_decision_time":RAND4, "noise": RAND5}, None, None, + dda_expected_negative, id="DriftDiffusionAnalytical-NegInput"), + pytest.param(Functions.DriftDiffusionAnalytical, 1e-4, + {"drift_rate": 1e-5, "threshold": RAND2, "starting_value": RAND3, + "non_decision_time":RAND4, "noise": RAND5}, None, "Rounding Errors", + dda_expected_small, id="DriftDiffusionAnalytical-SmallDriftRate"), + pytest.param(Functions.DriftDiffusionAnalytical, -1e-4, + {"drift_rate": 1e-5, "threshold": RAND2, "starting_value": RAND3, + "non_decision_time":RAND4, "noise": RAND5}, None, "Rounding Errors", + dda_expected_small, id="DriftDiffusionAnalytical-SmallDriftRate-NegInput"), + pytest.param(Functions.DriftDiffusionAnalytical, 1e-4, + {"drift_rate": -1e-5, "threshold": RAND2, "starting_value": RAND3, + "non_decision_time":RAND4, "noise": RAND5}, None, "Rounding Errors", + dda_expected_small, id="DriftDiffusionAnalytical-SmallNegDriftRate"), # Two tests with different inputs to show that input is ignored. - (Functions.NormalDist, 1e14, {"mean": RAND1, "standard_deviation": RAND2}, None, (1.0890232855122397)), - (Functions.NormalDist, 1e-4, {"mean": RAND1, "standard_deviation": RAND2}, None, (1.0890232855122397)), - (Functions.UniformDist, 1e14, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6879771504250405)), - (Functions.UniformDist, 1e-4, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6879771504250405)), + pytest.param(Functions.NormalDist, 1e14, {"mean": RAND1, "standard_deviation": RAND2}, + None, None, normal_expected_mt, id="NormalDist"), + pytest.param(Functions.NormalDist, 1e-4, {"mean": RAND1, "standard_deviation": RAND2}, + None, None, normal_expected_mt, id="NormalDist Small Input"), + pytest.param(Functions.UniformDist, 1e14, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, + None, None, uniform_expected_mt, id="UniformDist"), + pytest.param(Functions.UniformDist, 1e-4, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, + None, None, uniform_expected_mt, id="UniformDist"), # Inf inputs select Philox PRNG, test_var should never be inf - (Functions.NormalDist, np.inf, {"mean": RAND1, "standard_deviation": RAND2}, None, (0.5910357654927911)), - (Functions.NormalDist, -np.inf, {"mean": RAND1, "standard_deviation": RAND2}, None, (0.5910357654927911)), - (Functions.UniformDist, np.inf, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6043448764869507)), - (Functions.UniformDist, -np.inf, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6043448764869507)), + pytest.param(Functions.NormalDist, 1e14, {"mean": RAND1, "standard_deviation": RAND2}, + _SeededPhilox, None, normal_expected_philox, id="NormalDist Philox"), + pytest.param(Functions.NormalDist, 1e-4, {"mean": RAND1, "standard_deviation": RAND2}, + _SeededPhilox, None, normal_expected_philox, id="NormalDist Philox"), + pytest.param(Functions.UniformDist, 1e14, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, + _SeededPhilox, None, uniform_expected_philox, id="UniformDist Philox"), + pytest.param(Functions.UniformDist, 1e-4, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, + _SeededPhilox, None, uniform_expected_philox, id="UniformDist Philox"), ] -# use list, naming function produces ugly names -names = [ - "DriftDiffusionAnalytical-DefaultParameters", - "DriftDiffusionAnalytical-RandomParameters", - "DriftDiffusionAnalytical-NegInput", -# "DriftDiffusionAnalytical-SmallDriftRate", - "NormalDist1", - "NormalDist2", - "UniformDist1", - "UniformDist2", - "NormalDist1 Philox", - "NormalDist2 Philox", - "UniformDist1 Philox", - "UniformDist2 Philox", -] - - @pytest.mark.function @pytest.mark.transfer_function @pytest.mark.benchmark -@pytest.mark.parametrize("func, variable, params, llvm_skip, expected", test_data, ids=names) -def test_execute(func, variable, params, llvm_skip, expected, benchmark, func_mode): +@pytest.mark.parametrize("func, variable, params, prng, llvm_skip, expected", test_data) +def test_execute(func, variable, params, prng, llvm_skip, expected, benchmark, func_mode): benchmark.group = "TransferFunction " + func.componentName - if func_mode != 'Python' and llvm_skip: + if func_mode != 'Python': + precision = pytest.helpers.llvm_current_fp_precision() + # PTX needs only one special case, this is not worth adding + # it to the mechanism above + if func_mode == "PTX" and precision == 'fp32' and expected is dda_expected_negative: + expected = (0.4236549735069275, 5.960464477539063e-08, + 0.5173678398132324, 0.06942889094352722, 6.303247451782227, + 1.4934064149856567, 0.42889145016670227, 1.7737685441970825) + expected = llvm_expected.get(precision, {}).get(expected, expected) + + if expected is None: pytest.skip(llvm_skip) f = func(default_variable=variable, **params) - if np.isinf(variable): - f.parameters.random_state.set(_SeededPhilox([0])) + if prng is not None: + f.parameters.random_state.set(prng([0])) ex = pytest.helpers.get_func_execution(f, func_mode) res = ex(variable) - assert np.allclose(res, expected) + if pytest.helpers.llvm_current_fp_precision() == 'fp32': + assert np.allclose(res, expected) + else: + np.testing.assert_allclose(res, expected) + if benchmark.enabled: benchmark(ex, variable) diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index fe712bc49bb..92d736fda8a 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -18,16 +18,13 @@ np.random.seed(0) SIZE=10 test_var = np.random.rand(2, SIZE) -#TODO: Initializer should use different values to test recall -test_initializer = np.array([[test_var[0], test_var[1]]]) +test_initializer = np.array([[test_var[0] * 5, test_var[1] * 4]]) test_noise_arr = np.random.rand(SIZE) RAND1 = np.random.random(1) RAND2 = np.random.random() philox_var = np.random.rand(2, SIZE) -#TODO: Initializer should use different values to test recall -philox_initializer = np.array([[philox_var[0], philox_var[1]]]) test_data = [ # Default initializer does not work @@ -87,25 +84,25 @@ pytest.param(Functions.DictionaryMemory, philox_var, {'seed': module_seed}, [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], - id="DictionaryMemory (Philox)"), + id="DictionaryMemory Philox"), pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'seed': module_seed}, [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], - id="DictionaryMemory Rate (Philox)"), + id="DictionaryMemory Rate Philox"), pytest.param(Functions.DictionaryMemory, philox_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], - id="DictionaryMemory Initializer (Philox)"), - pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed}, + id="DictionaryMemory Initializer Philox"), + pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.01, 'seed': module_seed}, [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], - id="DictionaryMemory Low Retrieval (Philox)"), + id="DictionaryMemory Low Retrieval Philox"), pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'storage_prob':0.01, 'seed': module_seed}, [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], - id="DictionaryMemory Low Storage (Philox)"), - pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, + id="DictionaryMemory Low Storage Philox"), + pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.95, 'storage_prob':0.95, 'seed': module_seed}, [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], - id="DictionaryMemory High Storage/Retrieve (Philox)"), + id="DictionaryMemory High Storage/Retrieve Philox"), # Disable noise tests for now as they trigger failure in DictionaryMemory lookup # (Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'noise':RAND2}, [[ # 0.79172504, 0.52889492, 0.56804456, 0.92559664, 0.07103606, 0.0871293 , 0.0202184 , 0.83261985, 0.77815675, 0.87001215 ],[ @@ -121,18 +118,18 @@ #]]), pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed}, [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], - id="ContentAddressableMemory Low Retrieval (Philox)"), + id="ContentAddressableMemory Low Retrieval Philox"), pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'storage_prob':0.01, 'seed': module_seed}, [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], - id="ContentAddressableMemory Low Storage (Philox)"), + id="ContentAddressableMemory Low Storage Philox"), pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], - id="ContentAddressableMemory High Storage/Retrieval (Philox)"), + id="ContentAddressableMemory High Storage/Retrieval Philox"), pytest.param(Functions.ContentAddressableMemory, philox_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], - id="ContentAddressableMemory Initializer (Philox)"), + id="ContentAddressableMemory Initializer Philox"), ] @pytest.mark.function diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index 3ca5059706a..8fe21b1c5b2 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -16,20 +16,27 @@ test_philox = np.random.rand(SIZE) test_philox /= sum(test_philox) +expected_philox_prob = (0., 0.43037873274483895, 0., 0., 0., 0., 0., 0., 0., 0.) +expected_philox_ind = (0., 1., 0., 0., 0., 0., 0., 0., 0., 0.) + +llvm_res = {'fp32': {}, 'fp64': {}} +llvm_res['fp32'][expected_philox_prob] = (0.09762700647115707, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) +llvm_res['fp32'][expected_philox_ind] = (1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + test_data = [ - (Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]), - (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]), - (Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]), - (Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]), - (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]), - (Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696]), - (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, [0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.]), - (Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]), - (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, [0., 0., 0., 1.,0., 0., 0., 0., 0., 0.]), - (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, [0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.]), - (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]), - (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, [0., 0.43037873274483895, 0., 0., 0., 0., 0., 0., 0., 0.]), - (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]), + (Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.)), + (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.)), + (Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.)), + (Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.)), + (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.)), + (Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696)), + (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.)), + (Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.)), + (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.)), + (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, (0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.)), + (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, (0., 0., 0., 1., 0., 0., 0., 0., 0., 0.)), + (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, expected_philox_prob), + (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, expected_philox_ind), ] # use list, naming function produces ugly names @@ -45,8 +52,8 @@ "OneHot MIN_ABS_INDICATOR", "OneHot PROB", "OneHot PROB_INDICATOR", - "OneHot PROB PHILOX", - "OneHot PROB_INDICATOR PHILOX", + "OneHot PROB Philox", + "OneHot PROB_INDICATOR Philox", ] GROUP_PREFIX="SelectionFunction " @@ -62,10 +69,15 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): if len(variable) == 2 and variable[1] is test_philox: f.parameters.random_state.set(_SeededPhilox([0])) + if func_mode != 'Python': + precision = pytest.helpers.llvm_current_fp_precision() + expected = llvm_res[precision].get(expected, expected) + EX = pytest.helpers.get_func_execution(f, func_mode) EX(variable) res = EX(variable) + assert np.allclose(res, expected) if benchmark.enabled: benchmark(EX, variable) diff --git a/tests/functions/test_transfer.py b/tests/functions/test_transfer.py index b526a0cb4ed..5f71ee55d35 100644 --- a/tests/functions/test_transfer.py +++ b/tests/functions/test_transfer.py @@ -7,6 +7,7 @@ from math import e, pi, sqrt SIZE=10 +np.random.seed(0) test_var = np.random.rand(SIZE) test_matrix = np.random.rand(SIZE, SIZE) test_matrix_s = np.random.rand(SIZE, SIZE // 4) @@ -35,26 +36,46 @@ def gaussian_distort_helper(seed): test_data = [ - (Functions.Linear, test_var, {'slope':RAND1, 'intercept':RAND2}, None, test_var * RAND1 + RAND2), - (Functions.Exponential, test_var, {'scale':RAND1, 'rate':RAND2}, None, RAND1 * np.exp(RAND2 * test_var)), - (Functions.Logistic, test_var, {'gain':RAND1, 'x_0':RAND2, 'offset':RAND3, 'scale':RAND4}, None, RAND4 / (1 + np.exp(-(RAND1 * (test_var - RAND2)) + RAND3))), - (Functions.Tanh, test_var, {'gain':RAND1, 'bias':RAND2, 'x_0':RAND3, 'offset':RAND4}, None, tanh_helper), - (Functions.ReLU, test_var, {'gain':RAND1, 'bias':RAND2, 'leak':RAND3}, None, np.maximum(RAND1 * (test_var - RAND2), RAND3 * RAND1 *(test_var - RAND2))), - (Functions.Angle, [0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, - 0.64589411, 0.43758721, 0.891773, 0.96366276, 0.38344152], {}, None, - [0.85314409, 0.00556188, 0.01070476, 0.0214405, 0.05559454, - 0.08091079, 0.21657281, 0.19296643, 0.21343805, 0.92738261, 0.00483101]), - (Functions.Gaussian, test_var, {'standard_deviation':RAND1, 'bias':RAND2, 'scale':RAND3, 'offset':RAND4}, None, gaussian_helper), - (Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4 }, None, gaussian_distort_helper(0)), - (Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4, 'seed':0 }, None, gaussian_distort_helper(0)), - (Functions.SoftMax, test_var, {'gain':RAND1, 'per_item': False}, None, softmax_helper), - (Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_VAL}, 'per_item': False}, None, np.where(softmax_helper == np.max(softmax_helper), np.max(softmax_helper), 0)), - (Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_INDICATOR}, 'per_item': False}, None, np.where(softmax_helper == np.max(softmax_helper), 1, 0)), - (Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix.tolist()}, None, np.dot(test_var, test_matrix)), - (Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_l.tolist()}, None, np.dot(test_var, test_matrix_l)), - (Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_s.tolist()}, None, np.dot(test_var, test_matrix_s)), + pytest.param(Functions.Linear, test_var, {'slope':RAND1, 'intercept':RAND2}, test_var * RAND1 + RAND2, id="LINEAR"), + pytest.param(Functions.Exponential, test_var, {'scale':RAND1, 'rate':RAND2}, RAND1 * np.exp(RAND2 * test_var), id="EXPONENTIAL"), + pytest.param(Functions.Logistic, test_var, {'gain':RAND1, 'x_0':RAND2, 'offset':RAND3, 'scale':RAND4}, RAND4 / (1 + np.exp(-(RAND1 * (test_var - RAND2)) + RAND3)), id="LOGISTIC"), + pytest.param(Functions.Tanh, test_var, {'gain':RAND1, 'bias':RAND2, 'x_0':RAND3, 'offset':RAND4}, tanh_helper, id="TANH"), + pytest.param(Functions.ReLU, test_var, {'gain':RAND1, 'bias':RAND2, 'leak':RAND3}, np.maximum(RAND1 * (test_var - RAND2), RAND3 * RAND1 *(test_var - RAND2)), id="RELU"), + pytest.param(Functions.Angle, [0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, + 0.64589411, 0.43758721, 0.891773, 0.96366276, 0.38344152], {}, + [0.85314409, 0.00556188, 0.01070476, 0.0214405, 0.05559454, + 0.08091079, 0.21657281, 0.19296643, 0.21343805, 0.92738261, 0.00483101], + id="ANGLE"), + pytest.param(Functions.Gaussian, test_var, {'standard_deviation':RAND1, 'bias':RAND2, 'scale':RAND3, 'offset':RAND4}, gaussian_helper, id="GAUSSIAN"), + pytest.param(Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4 }, gaussian_distort_helper(0), id="GAUSSIAN DISTORT GLOBAL SEED"), + pytest.param(Functions.GaussianDistort, test_var.tolist(), {'bias': RAND1, 'variance':RAND2, 'offset':RAND3, 'scale':RAND4, 'seed':0 }, gaussian_distort_helper(0), id="GAUSSIAN DISTORT"), + pytest.param(Functions.SoftMax, test_var, {'gain':RAND1, 'per_item': False}, softmax_helper, id="SOFT_MAX ALL"), + pytest.param(Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_VAL}, 'per_item': False}, np.where(softmax_helper == np.max(softmax_helper), np.max(softmax_helper), 0), id="SOFT_MAX MAX_VAL"), + pytest.param(Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.MAX_INDICATOR}, 'per_item': False}, np.where(softmax_helper == np.max(softmax_helper), 1, 0), id="SOFT_MAX MAX_INDICATOR"), + pytest.param(Functions.SoftMax, test_var, {'gain':RAND1, 'params':{kw.OUTPUT_TYPE:kw.PROB}, 'per_item': False}, + [0.0, 0.0, 0.0, 0.0, test_var[4], 0.0, 0.0, 0.0, 0.0, 0.0], id="SOFT_MAX PROB"), + pytest.param(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix.tolist()}, np.dot(test_var, test_matrix), id="LINEAR_MATRIX SQUARE"), + pytest.param(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_l.tolist()}, np.dot(test_var, test_matrix_l), id="LINEAR_MATRIX WIDE"), + pytest.param(Functions.LinearMatrix, test_var.tolist(), {'matrix':test_matrix_s.tolist()}, np.dot(test_var, test_matrix_s), id="LINEAR_MATRIX TALL"), ] +@pytest.mark.function +@pytest.mark.transfer_function +@pytest.mark.benchmark +@pytest.mark.parametrize("func, variable, params, expected", test_data) +def test_execute(func, variable, params, expected, benchmark, func_mode): + if 'Angle' in func.componentName and func_mode != 'Python': + pytest.skip('Angle not yet supported by LLVM or PTX') + benchmark.group = "TransferFunction " + func.componentName + f = func(default_variable=variable, **params) + ex = pytest.helpers.get_func_execution(f, func_mode) + + res = ex(variable) + assert np.allclose(res, expected) + if benchmark.enabled: + benchmark(ex, variable) + + relu_derivative_helper = lambda x : RAND1 if x > 0 else RAND1 * RAND3 logistic_helper = RAND4 / (1 + np.exp(-(RAND1 * (test_var - RAND2)) + RAND3)) tanh_derivative_helper = (RAND1 * (test_var + RAND2) + RAND3) @@ -67,25 +88,6 @@ def gaussian_distort_helper(seed): (Functions.Tanh, test_var, {'gain':RAND1, 'bias':RAND2, 'offset':RAND3, 'scale':RAND4}, tanh_derivative_helper), ] -# use list, naming function produces ugly names -names = [ - "LINEAR", - "EXPONENTIAL", - "LOGISTIC", - "TANH", - "RELU", - "ANGLE", - "GAUSIAN", - "GAUSSIAN DISTORT GLOBAL SEED", - "GAUSSIAN DISTORT", - "SOFT_MAX ALL", - "SOFT_MAX MAX_VAL", - "SOFT_MAX MAX_INDICATOR", - "LINEAR_MATRIX SQUARE", - "LINEAR_MATRIX WIDE", - "LINEAR_MATRIX TALL", -] - derivative_names = [ "LINEAR_DERIVATIVE", "EXPONENTIAL_DERIVATIVE", @@ -94,23 +96,6 @@ def gaussian_distort_helper(seed): "TANH_DERIVATIVE", ] -@pytest.mark.function -@pytest.mark.transfer_function -@pytest.mark.benchmark -@pytest.mark.parametrize("func, variable, params, fail, expected", test_data, ids=names) -def test_execute(func, variable, params, fail, expected, benchmark, func_mode): - if 'Angle' in func.componentName and func_mode != 'Python': - pytest.skip('Angle not yet supported by LLVM or PTX') - benchmark.group = "TransferFunction " + func.componentName - f = func(default_variable=variable, **params) - ex = pytest.helpers.get_func_execution(f, func_mode) - - res = ex(variable) - assert np.allclose(res, expected) - if benchmark.enabled: - benchmark(ex, variable) - - @pytest.mark.function @pytest.mark.transfer_function @pytest.mark.benchmark diff --git a/tests/llvm/test_builtins_intrinsics.py b/tests/llvm/test_builtins_intrinsics.py index dad65836dd8..307ccdabc5d 100644 --- a/tests/llvm/test_builtins_intrinsics.py +++ b/tests/llvm/test_builtins_intrinsics.py @@ -10,12 +10,23 @@ @pytest.mark.benchmark(group="Builtins") @pytest.mark.parametrize("op, args, builtin, result", [ (np.exp, (x,), "__pnl_builtin_exp", np.exp(x)), + #~900 is the limit after which exp returns inf + (np.exp, (900.0,), "__pnl_builtin_exp", np.exp(900.0)), (np.log, (x,), "__pnl_builtin_log", np.log(x)), (np.power, (x,y), "__pnl_builtin_pow", np.power(x, y)), (np.tanh, (x,), "__pnl_builtin_tanh", np.tanh(x)), + #~450 is the limit after which exp(2x) used in tanh formula returns inf + (np.tanh, (450.0,), "__pnl_builtin_tanh", np.tanh(450)), (lambda x: 1.0 / np.tanh(x), (x,), "__pnl_builtin_coth", 1 / np.tanh(x)), + #~450 is the limit after which exp(2x) used in coth formula returns inf + (lambda x: 1.0 / np.tanh(x), (450,), "__pnl_builtin_coth", 1 / np.tanh(450)), (lambda x: 1.0 / np.sinh(x), (x,), "__pnl_builtin_csch", 1 / np.sinh(x)), - ], ids=["EXP", "LOG", "POW", "TANH", "COTH", "CSCH"]) + #~450 is the limit after which exp(2x) used in csch formula returns inf + (lambda x: 1.0 / np.sinh(x), (450,), "__pnl_builtin_csch", 1 / np.sinh(450)), + #~900 is the limit after which exp(x) used in csch formula returns inf + (lambda x: 1.0 / np.sinh(x), (900,), "__pnl_builtin_csch", 1 / np.sinh(900)), + ], ids=["EXP", "Large EXP", "LOG", "POW", "TANH", "Large TANH", "COTH", "Large COTH", + "CSCH", "Large CSCH", "xLarge CSCH"]) def test_builtin_op(benchmark, op, args, builtin, result, func_mode): if func_mode == 'Python': f = op @@ -34,10 +45,15 @@ def test_builtin_op(benchmark, op, args, builtin, result, func_mode): builder.ret_void() bin_f = pnlvm.LLVMBinaryFunction.get(wrap_name) - ptx_res = np.asarray(type(result)(0)) + dty = np.dtype(bin_f.byref_arg_types[0]) + ptx_res = np.empty_like(result, dtype=dty) ptx_res_arg = pnlvm.jit_engine.pycuda.driver.Out(ptx_res) def f(*a): - bin_f.cuda_call(*(np.double(p) for p in a), ptx_res_arg) + bin_f.cuda_call(*(dty.type(p) for p in a), ptx_res_arg) return ptx_res res = benchmark(f, *args) - assert np.allclose(res, result) + + if pytest.helpers.llvm_current_fp_precision() == 'fp32': + assert np.allclose(res, result) + else: + np.testing.assert_allclose(res, result) diff --git a/tests/llvm/test_builtins_matrix.py b/tests/llvm/test_builtins_matrix.py index 8010f3d317c..f3b485468f5 100644 --- a/tests/llvm/test_builtins_matrix.py +++ b/tests/llvm/test_builtins_matrix.py @@ -8,9 +8,12 @@ DIM_X = 1000 DIM_Y = 2000 -u = np.random.rand(DIM_X, DIM_Y) +# These are just basic tests to check that matrix indexing and operations +# work correctly when compiled. The values don't matter much. +# Might as well make them representable in fp32 for single precision testing. +u = np.random.rand(DIM_X, DIM_Y).astype(np.float32).astype(np.float64) +v = np.random.rand(DIM_X, DIM_Y).astype(np.float32).astype(np.float64) trans_u = u.transpose() -v = np.random.rand(DIM_X, DIM_Y) vector = np.random.rand(DIM_X) trans_vector = np.random.rand(DIM_Y) scalar = np.random.rand() @@ -29,183 +32,87 @@ mat_sadd_res = np.add(u, scalar) mat_smul_res = np.multiply(u, scalar) - -ct_u = u.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_v = v.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_vec = vector.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_tvec = trans_vector.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_mat_res = llvm_mat_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_vec_res = llvm_vec_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_tvec_res = llvm_tvec_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) - - -@pytest.mark.benchmark(group="Hadamard") -@pytest.mark.parametrize("op, builtin, result", [ - (np.add, "__pnl_builtin_mat_add", mat_add_res), - (np.subtract, "__pnl_builtin_mat_sub", mat_sub_res), - (np.multiply, "__pnl_builtin_mat_hadamard", mat_mul_res), - ], ids=["ADD", "SUB", "MUL"]) -def test_mat_hadamard(benchmark, op, builtin, result, func_mode): - if func_mode == 'Python': - def ex(): - return op(u, v) - elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get(builtin) - def ex(): - bin_f(ct_u, ct_v, DIM_X, DIM_Y, ct_mat_res) - return llvm_mat_res - elif func_mode == 'PTX': - bin_f = pnlvm.LLVMBinaryFunction.get(builtin) - cuda_u = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_v = pnlvm.jit_engine.pycuda.driver.In(v) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_mat_res) - def ex(): - bin_f.cuda_call(cuda_u, cuda_v, np.int32(DIM_X), np.int32(DIM_Y), cuda_res) - return llvm_mat_res - - res = benchmark(ex) - assert np.allclose(res, result) - - -@pytest.mark.benchmark(group="Scalar") -@pytest.mark.parametrize("op, builtin, result", [ - (np.add, "__pnl_builtin_mat_scalar_add", mat_sadd_res), - (np.multiply, "__pnl_builtin_mat_scalar_mult", mat_smul_res), - ], ids=["ADD", "MUL"]) -def test_mat_scalar(benchmark, op, builtin, result, func_mode): - if func_mode == 'Python': - def ex(): - return op(u, scalar) - elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get(builtin) - def ex(): - bin_f(ct_u, scalar, DIM_X, DIM_Y, ct_mat_res) - return llvm_mat_res - elif func_mode == 'PTX': - bin_f = pnlvm.LLVMBinaryFunction.get(builtin) - cuda_u = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_mat_res) - def ex(): - bin_f.cuda_call(cuda_u, np.float64(scalar), np.int32(DIM_X), np.int32(DIM_Y), cuda_res) - return llvm_mat_res - - res = benchmark(ex) - assert np.allclose(res, result) - - -@pytest.mark.benchmark(group="Dot") -def test_dot(benchmark, func_mode): - if func_mode == 'Python': - def ex(): - return np.dot(vector, u) - elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vxm") - def ex(): - bin_f(ct_vec, ct_u, DIM_X, DIM_Y, ct_vec_res) - return llvm_vec_res - elif func_mode == 'PTX': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vxm") - cuda_vec = pnlvm.jit_engine.pycuda.driver.In(vector) - cuda_mat = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_vec_res) - def ex(): - bin_f.cuda_call(cuda_vec, cuda_mat, np.int32(DIM_X), np.int32(DIM_Y), cuda_res) - return llvm_vec_res - - res = benchmark(ex) - assert np.allclose(res, dot_res) - - -@pytest.mark.llvm -@pytest.mark.benchmark(group="Dot") -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) -def test_dot_llvm_constant_dim(benchmark, mode): - custom_name = None - +def _get_const_dim_func(builtin, *dims): with pnlvm.LLVMBuilderContext.get_current() as ctx: - custom_name = ctx.get_unique_name("vxsqm") - double_ptr_ty = ctx.float_ty.as_pointer() - func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty)) + custom_name = ctx.get_unique_name("cont_dim" + builtin) + # get builtin function + builtin = ctx.import_llvm_function(builtin) + pointer_arg_types = [a for a in builtin.type.pointee.args if pnlvm.helpers.is_pointer(a)] + + func_ty = ir.FunctionType(ir.VoidType(), pointer_arg_types) - # get builtin IR - builtin = ctx.import_llvm_function("__pnl_builtin_vxm") # Create square vector matrix multiply - function = ir.Function(ctx.module, func_ty, name=custom_name) - _x = ctx.int32_ty(DIM_X) - _y = ctx.int32_ty(DIM_Y) - _v, _m, _o = function.args + function = ir.Function(ctx.module, builtin.type.pointee, name=custom_name) + const_dims = (ctx.int32_ty(d) for d in dims) + *inputs, output = (a for a in function.args if pnlvm.helpers.is_floating_point(a)) block = function.append_basic_block(name="entry") builder = ir.IRBuilder(block) - builder.call(builtin, [_v, _m, _x, _y, _o]) + builder.call(builtin, [*inputs, *const_dims, output]) builder.ret_void() - binf2 = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - benchmark(binf2, ct_vec, ct_u, ct_vec_res) - else: - cuda_vec = pnlvm.jit_engine.pycuda.driver.In(vector) - cuda_mat = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_vec_res) - benchmark(binf2.cuda_call, cuda_vec, cuda_mat, cuda_res) - assert np.allclose(llvm_vec_res, dot_res) - - -@pytest.mark.benchmark(group="Dot") -def test_dot_transposed(benchmark, func_mode): + return custom_name + +@pytest.mark.benchmark +@pytest.mark.parametrize("op, x, y, builtin, result", [ + (np.add, u, v, "__pnl_builtin_mat_add", mat_add_res), + (np.subtract, u, v, "__pnl_builtin_mat_sub", mat_sub_res), + (np.multiply, u, v, "__pnl_builtin_mat_hadamard", mat_mul_res), + (np.add, u, scalar, "__pnl_builtin_mat_scalar_add", mat_sadd_res), + (np.multiply, u, scalar, "__pnl_builtin_mat_scalar_mult", mat_smul_res), + (np.dot, vector, u, "__pnl_builtin_vxm", dot_res), + (np.dot, trans_vector, trans_u, "__pnl_builtin_vxm_transposed", trans_dot_res), + ], ids=["ADD", "SUB", "MUL", "ADDS", "MULS", "DOT", "TRANS DOT"]) +@pytest.mark.parametrize("dims", [(DIM_X, DIM_Y), (0, 0)], ids=["VAR-DIM", "CONST-DIM"]) +def test_matrix_op(benchmark, op, x, y, builtin, result, func_mode, dims): if func_mode == 'Python': def ex(): - return np.dot(trans_vector, trans_u) - elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vxm_transposed") - def ex(): - bin_f(ct_tvec, ct_u, DIM_X, DIM_Y, ct_tvec_res) - return llvm_tvec_res - elif func_mode == 'PTX': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vxm_transposed") - cuda_vec = pnlvm.jit_engine.pycuda.driver.In(trans_vector) - cuda_mat = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_tvec_res) - def ex(): - bin_f.cuda_call(cuda_vec, cuda_mat, np.int32(DIM_X), np.int32(DIM_Y), cuda_res) - return llvm_tvec_res + return op(x, y) - res = benchmark(ex) - assert np.allclose(res, trans_dot_res) + elif func_mode == 'LLVM': + if dims == (0, 0): + func_name = _get_const_dim_func(builtin, DIM_X, DIM_Y) + else: + func_name = builtin + bin_f = pnlvm.LLVMBinaryFunction.get(func_name) + dty = np.dtype(bin_f.byref_arg_types[0]) + assert dty == np.dtype(bin_f.byref_arg_types[1]) + assert dty == np.dtype(bin_f.byref_arg_types[4]) -@pytest.mark.llvm -@pytest.mark.benchmark(group="Dot") -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) -def test_dot_transposed_llvm_constant_dim(benchmark, mode): - custom_name = None + lx = x.astype(dty) + ly = dty.type(y) if np.isscalar(y) else y.astype(dty) + lres = np.empty_like(result, dtype=dty) - with pnlvm.LLVMBuilderContext.get_current() as ctx: - custom_name = ctx.get_unique_name("vxsqm") - double_ptr_ty = ctx.float_ty.as_pointer() - func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty)) + ct_x = lx.ctypes.data_as(bin_f.c_func.argtypes[0]) + ct_y = ly if np.isscalar(ly) else ly.ctypes.data_as(bin_f.c_func.argtypes[1]) + ct_res = lres.ctypes.data_as(bin_f.c_func.argtypes[4]) - # get builtin IR - builtin = ctx.import_llvm_function("__pnl_builtin_vxm_transposed") + def ex(): + bin_f(ct_x, ct_y, *dims, ct_res) + return lres - # Create square vector matrix multiply - function = ir.Function(ctx.module, func_ty, name=custom_name) - _x = ctx.int32_ty(DIM_X) - _y = ctx.int32_ty(DIM_Y) - _v, _m, _o = function.args - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - builder.call(builtin, [_v, _m, _x, _y, _o]) - builder.ret_void() + elif func_mode == 'PTX': + if dims == (0, 0): + func_name = _get_const_dim_func(builtin, DIM_X, DIM_Y) + else: + func_name = builtin + + bin_f = pnlvm.LLVMBinaryFunction.get(func_name) + dty = np.dtype(bin_f.byref_arg_types[0]) + assert dty == np.dtype(bin_f.byref_arg_types[1]) + assert dty == np.dtype(bin_f.byref_arg_types[4]) + + lx = x.astype(dty) + ly = dty.type(y) if np.isscalar(y) else y.astype(dty) + lres = np.empty_like(result, dtype=dty) + + cuda_x = pnlvm.jit_engine.pycuda.driver.In(lx) + cuda_y = ly if np.isscalar(ly) else pnlvm.jit_engine.pycuda.driver.In(ly) + cuda_res = pnlvm.jit_engine.pycuda.driver.Out(lres) + def ex(): + bin_f.cuda_call(cuda_x, cuda_y, np.int32(dims[0]), np.int32(dims[1]), cuda_res) + return lres - binf2 = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - benchmark(binf2, ct_tvec, ct_u, ct_tvec_res) - else: - cuda_vec = pnlvm.jit_engine.pycuda.driver.In(trans_vector) - cuda_mat = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_tvec_res) - benchmark(binf2.cuda_call, cuda_vec, cuda_mat, cuda_res) - assert np.allclose(llvm_tvec_res, trans_dot_res) + res = benchmark(ex) + assert np.allclose(res, result) diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index 86a02ff8627..19dbeb7b818 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -10,7 +10,7 @@ @pytest.mark.benchmark(group="Mersenne Twister integer PRNG") @pytest.mark.parametrize('mode', ['Python', 'numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) def test_random_int(benchmark, mode): res = [] if mode == 'Python': @@ -53,7 +53,7 @@ def f(): @pytest.mark.benchmark(group="Mersenne Twister floating point PRNG") @pytest.mark.parametrize('mode', ['Python', 'numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) def test_random_float(benchmark, mode): res = [] if mode == 'Python': @@ -72,7 +72,7 @@ def f(): init_fun(state, SEED) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double') - out = ctypes.c_double() + out = gen_fun.byref_arg_types[1]() def f(): gen_fun(state, out) return out.value @@ -83,7 +83,7 @@ def f(): init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double') - out = np.asfarray([0.0], dtype=np.float64) + out = np.asfarray([0.0], dtype=np.dtype(gen_fun.byref_arg_types[1])) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) def f(): gen_fun.cuda_call(gpu_state, gpu_out) @@ -97,7 +97,7 @@ def f(): @pytest.mark.benchmark(group="Marsenne Twister Normal distribution") @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) # Python uses different algorithm so skip it in this test def test_random_normal(benchmark, mode): if mode == 'numpy': @@ -111,7 +111,7 @@ def f(): init_fun(state, SEED) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal') - out = ctypes.c_double() + out = gen_fun.byref_arg_types[1]() def f(): gen_fun(state, out) return out.value @@ -122,7 +122,7 @@ def f(): init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal') - out = np.asfarray([0.0], dtype=np.float64) + out = np.asfarray([0.0], dtype=np.dtype(gen_fun.byref_arg_types[1])) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) def f(): gen_fun.cuda_call(gpu_state, gpu_out) diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py index 1117fcc3605..479e91379e7 100644 --- a/tests/llvm/test_builtins_philox_random.py +++ b/tests/llvm/test_builtins_philox_random.py @@ -9,7 +9,7 @@ @pytest.mark.benchmark(group="Philox integer PRNG") @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize('seed, expected', [ (0, [259491006799949737, 4754966410622352325, 8698845897610382596, 1686395276220330909, 18061843536446043542, 4723914225006068263]), (-5, [4936860362606747269, 11611290354192475889, 2015254117581537576, 4620074701282684350, 9574602527017877750, 2811009141214824706]), @@ -57,7 +57,7 @@ def f(): @pytest.mark.benchmark(group="Philox integer PRNG") @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) def test_random_int32(benchmark, mode): res = [] if mode == 'numpy': @@ -99,7 +99,7 @@ def f(): @pytest.mark.benchmark(group="Philox floating point PRNG") @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) def test_random_double(benchmark, mode): res = [] if mode == 'numpy': @@ -138,7 +138,7 @@ def f(): @pytest.mark.benchmark(group="Philox floating point PRNG") @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) def test_random_float(benchmark, mode): res = [] if mode == 'numpy': @@ -177,7 +177,7 @@ def f(): @pytest.mark.benchmark(group="Philox Normal distribution") @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=pytest.mark.cuda)]) + pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize('fp_type', [pnlvm.ir.DoubleType(), pnlvm.ir.FloatType()], ids=lambda x: str(x)) def test_random_normal(benchmark, mode, fp_type): diff --git a/tests/llvm/test_builtins_vector.py b/tests/llvm/test_builtins_vector.py index cf101848eca..7bb1f472cae 100644 --- a/tests/llvm/test_builtins_vector.py +++ b/tests/llvm/test_builtins_vector.py @@ -5,50 +5,66 @@ from psyneulink.core import llvm as pnlvm -DIM_X=1000 - - -u = np.random.rand(DIM_X) -v = np.random.rand(DIM_X) +DIM_X=1500 +# These are just basic tests to check that vector indexing and operations +# work correctly when compiled. The values don't matter much. +# Might as well make them representable in fp32 for single precision testing. +u = np.random.rand(DIM_X).astype(np.float32).astype(np.float64) +v = np.random.rand(DIM_X).astype(np.float32).astype(np.float64) scalar = np.random.rand() -llvm_res = np.random.rand(DIM_X) add_res = np.add(u, v) sub_res = np.subtract(u, v) mul_res = np.multiply(u, v) smul_res = np.multiply(u, scalar) -ct_u = u.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_v = v.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_res = llvm_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) - - @pytest.mark.benchmark(group="Hadamard") -@pytest.mark.parametrize("op, y, llvm_y, builtin, result", [ - (np.add, v, ct_v, "__pnl_builtin_vec_add", add_res), - (np.subtract, v, ct_v, "__pnl_builtin_vec_sub", sub_res), - (np.multiply, v, ct_v, "__pnl_builtin_vec_hadamard", mul_res), - (np.multiply, scalar, scalar, "__pnl_builtin_vec_scalar_mult", smul_res), +@pytest.mark.parametrize("op, v, builtin, result", [ + (np.add, v, "__pnl_builtin_vec_add", add_res), + (np.subtract, v, "__pnl_builtin_vec_sub", sub_res), + (np.multiply, v, "__pnl_builtin_vec_hadamard", mul_res), + (np.multiply, scalar, "__pnl_builtin_vec_scalar_mult", smul_res), ], ids=["ADD", "SUB", "MUL", "SMUL"]) -def test_vector_op(benchmark, op, y, llvm_y, builtin, result, func_mode): +def test_vector_op(benchmark, op, v, builtin, result, func_mode): if func_mode == 'Python': def ex(): - return op(u, y) + return op(u, v) elif func_mode == 'LLVM': bin_f = pnlvm.LLVMBinaryFunction.get(builtin) + dty = np.dtype(bin_f.byref_arg_types[0]) + assert dty == np.dtype(bin_f.byref_arg_types[1]) + assert dty == np.dtype(bin_f.byref_arg_types[3]) + + lu = u.astype(dty) + lv = dty.type(v) if np.isscalar(v) else v.astype(dty) + lres = np.empty_like(lu) + + ct_u = lu.ctypes.data_as(bin_f.c_func.argtypes[0]) + ct_v = lv if np.isscalar(lv) else lv.ctypes.data_as(bin_f.c_func.argtypes[1]) + ct_res = lres.ctypes.data_as(bin_f.c_func.argtypes[3]) + def ex(): - bin_f(ct_u, llvm_y, DIM_X, ct_res) - return llvm_res + bin_f(ct_u, ct_v, DIM_X, ct_res) + return lres + elif func_mode == 'PTX': bin_f = pnlvm.LLVMBinaryFunction.get(builtin) - cuda_u = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_y = np.float64(y) if np.isscalar(y) else pnlvm.jit_engine.pycuda.driver.In(y) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_res) + dty = np.dtype(bin_f.byref_arg_types[0]) + assert dty == np.dtype(bin_f.byref_arg_types[1]) + assert dty == np.dtype(bin_f.byref_arg_types[3]) + + lu = u.astype(dty) + lv = dty.type(v) if np.isscalar(v) else v.astype(dty) + lres = np.empty_like(lu) + + cuda_u = pnlvm.jit_engine.pycuda.driver.In(lu) + cuda_v = lv if np.isscalar(lv) else pnlvm.jit_engine.pycuda.driver.In(lv) + cuda_res = pnlvm.jit_engine.pycuda.driver.Out(lres) def ex(): - bin_f.cuda_call(cuda_u, cuda_y, np.int32(DIM_X), cuda_res) - return llvm_res + bin_f.cuda_call(cuda_u, cuda_v, np.int32(DIM_X), cuda_res) + return lres res = benchmark(ex) assert np.allclose(res, result) @@ -61,16 +77,25 @@ def ex(): return np.sum(u) elif func_mode == 'LLVM': bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum") + + lu = u.astype(np.dtype(bin_f.byref_arg_types[0])) + llvm_res = np.empty(1, dtype=lu.dtype) + + ct_u = lu.ctypes.data_as(bin_f.c_func.argtypes[0]) + ct_res = llvm_res.ctypes.data_as(bin_f.c_func.argtypes[2]) + def ex(): bin_f(ct_u, DIM_X, ct_res) return llvm_res[0] elif func_mode == 'PTX': bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum") - cuda_u = pnlvm.jit_engine.pycuda.driver.In(u) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(llvm_res) + lu = u.astype(np.dtype(bin_f.byref_arg_types[0])) + cuda_u = pnlvm.jit_engine.pycuda.driver.In(lu) + res = np.empty(1, dtype=lu.dtype) + cuda_res = pnlvm.jit_engine.pycuda.driver.Out(res) def ex(): bin_f.cuda_call(cuda_u, np.int32(DIM_X), cuda_res) - return llvm_res[0] + return res[0] res = benchmark(ex) assert np.allclose(res, sum(u)) diff --git a/tests/llvm/test_compile.py b/tests/llvm/test_compile.py index ed012f037b5..406fc1e2430 100644 --- a/tests/llvm/test_compile.py +++ b/tests/llvm/test_compile.py @@ -8,20 +8,25 @@ DIM_X=1000 DIM_Y=2000 -matrix = np.random.rand(DIM_X, DIM_Y) -vector = np.random.rand(DIM_X) -llvm_res = np.random.rand(DIM_Y) - -ct_vec = vector.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -ct_mat = matrix.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) -x, y = matrix.shape - @pytest.mark.llvm def test_recompile(): # The original builtin mxv function binf = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') + dty = np.dtype(binf.byref_arg_types[0]) + assert dty == np.dtype(binf.byref_arg_types[1]) + assert dty == np.dtype(binf.byref_arg_types[4]) + + matrix = np.random.rand(DIM_X, DIM_Y).astype(dty) + vector = np.random.rand(DIM_X).astype(dty) + llvm_res = np.empty(DIM_Y, dtype=dty) + + x, y = matrix.shape + + ct_vec = vector.ctypes.data_as(binf.c_func.argtypes[0]) + ct_mat = matrix.ctypes.data_as(binf.c_func.argtypes[1]) + orig_res = np.empty_like(llvm_res) - ct_res = orig_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + ct_res = orig_res.ctypes.data_as(binf.c_func.argtypes[4]) binf.c_func(ct_vec, ct_mat, x, y, ct_res) @@ -30,7 +35,7 @@ def test_recompile(): pnlvm._llvm_build() rebuild_res = np.empty_like(llvm_res) - ct_res = rebuild_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + ct_res = rebuild_res.ctypes.data_as(binf.c_func.argtypes[4]) binf.c_func(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(orig_res, rebuild_res) @@ -38,13 +43,13 @@ def test_recompile(): # Get a new pointer binf2 = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') new_res = np.empty_like(llvm_res) - ct_res = new_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + ct_res = new_res.ctypes.data_as(binf2.c_func.argtypes[4]) - binf.c_func(ct_vec, ct_mat, x, y, ct_res) + binf2.c_func(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(rebuild_res, new_res) callable_res = np.empty_like(llvm_res) - ct_res = callable_res.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + ct_res = callable_res.ctypes.data_as(binf.c_func.argtypes[4]) - binf(ct_vec, ct_mat, x, y, ct_res) + binf2(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(new_res, callable_res) diff --git a/tests/llvm/test_custom_func.py b/tests/llvm/test_custom_func.py index 5435b9f3013..406beb937c3 100644 --- a/tests/llvm/test_custom_func.py +++ b/tests/llvm/test_custom_func.py @@ -4,67 +4,6 @@ from psyneulink.core import llvm as pnlvm -from llvmlite import ir - - -ITERATIONS=100 -DIM_X=1000 - -matrix = np.random.rand(DIM_X, DIM_X) -vector = np.random.rand(DIM_X) -llvm_res = np.random.rand(DIM_X) - -x, y = matrix.shape - -@pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) -def test_fixed_dimensions__pnl_builtin_vxm(mode): - # The original builtin mxv function - binf = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vxm") - orig_res = np.empty_like(llvm_res) - if mode == 'CPU': - ct_in_ty, ct_mat_ty, _, _, ct_res_ty = binf.byref_arg_types - - ct_vec = vector.ctypes.data_as(ctypes.POINTER(ct_in_ty)) - ct_mat = matrix.ctypes.data_as(ctypes.POINTER(ct_mat_ty)) - ct_res = orig_res.ctypes.data_as(ctypes.POINTER(ct_res_ty)) - - binf.c_func(ct_vec, ct_mat, x, y, ct_res) - else: - binf.cuda_wrap_call(vector, matrix, np.int32(x), np.int32(y), orig_res) - - custom_name = None - - with pnlvm.LLVMBuilderContext.get_current() as ctx: - custom_name = ctx.get_unique_name("vxsqm") - double_ptr_ty = ctx.convert_python_struct_to_llvm_ir(1.0).as_pointer() - func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty)) - - # get builtin IR - builtin = ctx.import_llvm_function("__pnl_builtin_vxm") - - # Create square vector matrix multiply - function = ir.Function(ctx.module, func_ty, name=custom_name) - _x = ctx.int32_ty(x) - _v, _m, _o = function.args - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - builder.call(builtin, [_v, _m, _x, _x, _o]) - builder.ret_void() - - binf2 = pnlvm.LLVMBinaryFunction.get(custom_name) - new_res = np.empty_like(llvm_res) - - if mode == 'CPU': - ct_res = new_res.ctypes.data_as(ctypes.POINTER(ct_res_ty)) - - binf2(ct_vec, ct_mat, ct_res) - else: - binf2.cuda_wrap_call(vector, matrix, new_res) - - assert np.array_equal(orig_res, new_res) - @pytest.mark.llvm @pytest.mark.parametrize('mode', ['CPU', @@ -79,14 +18,15 @@ def test_integer_broadcast(mode, val): with pnlvm.LLVMBuilderContext.get_current() as ctx: custom_name = ctx.get_unique_name("broadcast") int_ty = ctx.convert_python_struct_to_llvm_ir(val) - int_array_ty = ir.ArrayType(int_ty, 8) - func_ty = ir.FunctionType(ir.VoidType(), (int_ty.as_pointer(), - int_array_ty.as_pointer())) - function = ir.Function(ctx.module, func_ty, name=custom_name) + int_array_ty = pnlvm.ir.ArrayType(int_ty, 8) + func_ty = pnlvm.ir.FunctionType(pnlvm.ir.VoidType(), + (int_ty.as_pointer(), + int_array_ty.as_pointer())) + function = pnlvm.ir.Function(ctx.module, func_ty, name=custom_name) i, o = function.args block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) + builder = pnlvm.ir.IRBuilder(block) ival = builder.load(i) ival = builder.add(ival, ival.type(1)) with pnlvm.helpers.array_ptr_loop(builder, o, "broadcast") as (b, i): diff --git a/tests/llvm/test_debug_composition.py b/tests/llvm/test_debug_composition.py index 5555e601791..e84ba68c1c8 100644 --- a/tests/llvm/test_debug_composition.py +++ b/tests/llvm/test_debug_composition.py @@ -10,7 +10,7 @@ from psyneulink.core.compositions.composition import Composition debug_options=["const_input=[[[7]]]", "const_input", "const_data", "const_params", "const_data", "const_state", "stat", "time_stat", "unaligned_copy"] -options_combinations = (";".join(("debug_info", *c)) for i in range(len(debug_options) + 1) for c in combinations(debug_options, i)) +options_combinations = (";".join(("", *c)) for i in range(len(debug_options) + 1) for c in combinations(debug_options, i)) @pytest.mark.composition @pytest.mark.parametrize("mode", [pytest.param(pnlvm.ExecutionMode.LLVMRun, marks=pytest.mark.llvm), diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index 6862c784d00..56dff0df215 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -108,7 +108,11 @@ def test_helper_fclamp_const(mode): [[1, 1], [1, 100], [1,2], [-4,5], [0, -100], [-1,-2], [[1,1,1,-4,0,-1], [1,100,2,5,-100,-2]] ]) -def test_helper_is_close(mode, var1, var2, rtol, atol): +@pytest.mark.parametrize('fp_type', [ir.DoubleType, ir.FloatType]) +def test_helper_is_close(mode, var1, var2, rtol, atol, fp_type): + + # Instantiate LLVMBuilderContext using the preferred fp type + pnlvm.builder_context.LLVMBuilderContext(fp_type()) tolerance = {} if rtol is not None: @@ -116,11 +120,10 @@ def test_helper_is_close(mode, var1, var2, rtol, atol): if atol is not None: tolerance['atol'] = atol - with pnlvm.LLVMBuilderContext.get_current() as ctx: - double_ptr_ty = ir.DoubleType().as_pointer() - func_ty = ir.FunctionType(ir.VoidType(), [double_ptr_ty, double_ptr_ty, - double_ptr_ty, ctx.int32_ty]) + float_ptr_ty = ctx.float_ty.as_pointer() + func_ty = ir.FunctionType(ir.VoidType(), [float_ptr_ty, float_ptr_ty, + float_ptr_ty, ctx.int32_ty]) custom_name = ctx.get_unique_name("is_close") function = ir.Function(ctx.module, func_ty, name=custom_name) @@ -143,13 +146,15 @@ def test_helper_is_close(mode, var1, var2, rtol, atol): builder.ret_void() - vec1 = np.atleast_1d(np.asfarray(var1)) - vec2 = np.atleast_1d(np.asfarray(var2)) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + + dty = np.dtype(bin_f.byref_arg_types[0]) + vec1 = np.atleast_1d(np.asfarray(var1, dtype=dty)) + vec2 = np.atleast_1d(np.asfarray(var2, dtype=dty)) assert len(vec1) == len(vec2) res = np.empty_like(vec2) ref = np.isclose(vec1, vec2, **tolerance) - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) ct_vec1 = vec1.ctypes.data_as(ct_ty) @@ -459,8 +464,8 @@ def test_helper_numerical(mode, op, var, expected, fp_type): @pytest.mark.parametrize('mode', ['CPU', pytest.param('PTX', marks=pytest.mark.cuda)]) @pytest.mark.parametrize('var,expected', [ - (np.array([1,2,3], dtype=np.float64), np.array([2,3,4], dtype=np.float64)), - (np.array([[1,2],[3,4]], dtype=np.float64), np.array([[2,3],[4,5]], dtype=np.float64)), + (np.asfarray([1,2,3]), np.asfarray([2,3,4])), + (np.asfarray([[1,2],[3,4]]), np.asfarray([[2,3],[4,5]])), ], ids=["vector", "matrix"]) def test_helper_elementwise_op(mode, var, expected): with pnlvm.LLVMBuilderContext.get_current() as ctx: @@ -479,12 +484,18 @@ def test_helper_elementwise_op(mode, var, expected): builder.ret_void() bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + + # convert input to the right type + dt = np.dtype(bin_f.byref_arg_types[0]) + dt = np.empty(1, dtype=dt).flatten().dtype + var = var.astype(dt) + if mode == 'CPU': ct_vec = np.ctypeslib.as_ctypes(var) res = bin_f.byref_arg_types[1]() bin_f(ct_vec, ctypes.byref(res)) else: - res = copy.deepcopy(var) + res = np.empty_like(var) bin_f.cuda_wrap_call(var, res) assert np.array_equal(res, expected) @@ -524,13 +535,64 @@ def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): builder.ret_void() bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + + # convert input to the right type + dt = np.dtype(bin_f.byref_arg_types[0]) + dt = np.empty(1, dtype=dt).flatten().dtype + var1 = var1.astype(dt) + var2 = var2.astype(dt) + if mode == 'CPU': ct_vec = np.ctypeslib.as_ctypes(var1) ct_vec_2 = np.ctypeslib.as_ctypes(var2) res = bin_f.byref_arg_types[2]() bin_f(ct_vec, ct_vec_2, ctypes.byref(res)) else: - res = copy.deepcopy(var1) + res = np.empty_like(var1) bin_f.cuda_wrap_call(var1, var2, res) assert np.array_equal(res, expected) + + +_fp_types = [ir.DoubleType, ir.FloatType, ir.HalfType] + + +@pytest.mark.llvm +@pytest.mark.parametrize('mode', ['CPU', + pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('t1', _fp_types) +@pytest.mark.parametrize('t2', _fp_types) +@pytest.mark.parametrize('val', [1.0, '-Inf', 'Inf', 'NaN', 16777216, 16777217, -1.0]) +def test_helper_convert_fp_type(t1, t2, mode, val): + with pnlvm.LLVMBuilderContext.get_current() as ctx: + func_ty = ir.FunctionType(ir.VoidType(), [t1().as_pointer(), t2().as_pointer()]) + custom_name = ctx.get_unique_name("fp_convert") + function = ir.Function(ctx.module, func_ty, name=custom_name) + x, y = function.args + block = function.append_basic_block(name="entry") + builder = ir.IRBuilder(block) + + x_val = builder.load(x) + conv_x = pnlvm.helpers.convert_type(builder, x_val, y.type.pointee) + builder.store(conv_x, y) + builder.ret_void() + + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + + # Convert type to numpy dtype + npt1, npt2 = (np.dtype(bin_f.byref_arg_types[x]) for x in (0, 1)) + npt1, npt2 = (np.float16().dtype if x == np.uint16 else x for x in (npt1, npt2)) + + # instantiate value, result and reference + x = np.asfarray(val, dtype=npt1) + y = np.asfarray(np.random.rand(), dtype=npt2) + ref = x.astype(npt2) + + if mode == 'CPU': + ct_x = x.ctypes.data_as(bin_f.c_func.argtypes[0]) + ct_y = y.ctypes.data_as(bin_f.c_func.argtypes[1]) + bin_f(ct_x, ct_y) + else: + bin_f.cuda_wrap_call(x, y) + + assert np.allclose(y, ref, equal_nan=True) diff --git a/tests/json/model_backprop.py b/tests/mdf/model_backprop.py similarity index 100% rename from tests/json/model_backprop.py rename to tests/mdf/model_backprop.py diff --git a/tests/json/model_basic.py b/tests/mdf/model_basic.py similarity index 100% rename from tests/json/model_basic.py rename to tests/mdf/model_basic.py diff --git a/tests/json/model_basic_non_identity.py b/tests/mdf/model_basic_non_identity.py similarity index 100% rename from tests/json/model_basic_non_identity.py rename to tests/mdf/model_basic_non_identity.py diff --git a/tests/json/model_integrators.py b/tests/mdf/model_integrators.py similarity index 100% rename from tests/json/model_integrators.py rename to tests/mdf/model_integrators.py diff --git a/tests/json/model_nested_comp_with_scheduler.py b/tests/mdf/model_nested_comp_with_scheduler.py similarity index 100% rename from tests/json/model_nested_comp_with_scheduler.py rename to tests/mdf/model_nested_comp_with_scheduler.py diff --git a/tests/json/model_udfs.py b/tests/mdf/model_udfs.py similarity index 100% rename from tests/json/model_udfs.py rename to tests/mdf/model_udfs.py diff --git a/tests/json/model_varied_matrix_sizes.py b/tests/mdf/model_varied_matrix_sizes.py similarity index 100% rename from tests/json/model_varied_matrix_sizes.py rename to tests/mdf/model_varied_matrix_sizes.py diff --git a/tests/json/model_with_control.py b/tests/mdf/model_with_control.py similarity index 100% rename from tests/json/model_with_control.py rename to tests/mdf/model_with_control.py diff --git a/tests/json/model_with_two_conjoint_comps.py b/tests/mdf/model_with_two_conjoint_comps.py similarity index 100% rename from tests/json/model_with_two_conjoint_comps.py rename to tests/mdf/model_with_two_conjoint_comps.py diff --git a/tests/json/model_with_two_disjoint_comps.py b/tests/mdf/model_with_two_disjoint_comps.py similarity index 100% rename from tests/json/model_with_two_disjoint_comps.py rename to tests/mdf/model_with_two_disjoint_comps.py diff --git a/tests/json/stroop_conflict_monitoring.py b/tests/mdf/stroop_conflict_monitoring.py similarity index 100% rename from tests/json/stroop_conflict_monitoring.py rename to tests/mdf/stroop_conflict_monitoring.py diff --git a/tests/json/test_json.py b/tests/mdf/test_mdf.py similarity index 76% rename from tests/json/test_json.py rename to tests/mdf/test_mdf.py index 091e55def88..150f7c1964a 100644 --- a/tests/json/test_json.py +++ b/tests/mdf/test_mdf.py @@ -2,13 +2,14 @@ import os import psyneulink as pnl import pytest -import sys pytest.importorskip( 'modeci_mdf', - reason='JSON methods require modeci_mdf package' + reason='MDF methods require modeci_mdf package' ) +from modeci_mdf.execution_engine import evaluate_onnx_expr # noqa: E402 + # stroop stimuli red = [1, 0] @@ -76,7 +77,7 @@ def test_json_results_equivalence( simple_edge_format, ): # Get python script from file and execute - filename = f'{os.path.dirname(__file__)}/{filename}' + filename = os.path.join(os.path.dirname(__file__), filename) with open(filename, 'r') as orig_file: exec(orig_file.read()) exec(f'{composition_name}.run(inputs={input_dict_str})') @@ -103,7 +104,7 @@ def test_write_json_file( simple_edge_format, ): # Get python script from file and execute - filename = f'{os.path.dirname(__file__)}/{filename}' + filename = os.path.join(os.path.dirname(__file__), filename) with open(filename, 'r') as orig_file: exec(orig_file.read()) exec(f'{composition_name}.run(inputs={input_dict_str})') @@ -140,7 +141,7 @@ def test_write_json_file_multiple_comps( orig_results = {} # Get python script from file and execute - filename = f'{os.path.dirname(__file__)}/{filename}' + filename = os.path.join(os.path.dirname(__file__), filename) with open(filename, 'r') as orig_file: exec(orig_file.read()) @@ -168,37 +169,36 @@ def test_write_json_file_multiple_comps( # Values are generated from running onnx function RandomUniform and # RandomNormal with parameters used in model_integrators.py (seed 0). # RandomNormal values are different on mac versus linux and windows -if sys.platform == 'linux': - onnx_integrators_fixed_seeded_noise = { - 'A': [[-0.9999843239784241]], - 'B': [[-1.1295466423034668]], - 'C': [[-0.0647732987999916]], - 'D': [[-0.499992161989212]], - 'E': [[-0.2499941289424896]], - } -elif sys.platform == 'win32': - onnx_integrators_fixed_seeded_noise = { - 'A': [[0.0976270437240601]], - 'B': [[-0.4184607267379761]], - 'C': [[0.290769636631012]], - 'D': [[0.04881352186203]], - 'E': [[0.1616101264953613]], - } -else: - assert sys.platform == 'darwin' - onnx_integrators_fixed_seeded_noise = { - 'A': [[-0.9999550580978394]], - 'B': [[-0.8846577405929565]], - 'C': [[0.0576711297035217]], - 'D': [[-0.4999775290489197]], - 'E': [[-0.2499831467866898]], +onnx_noise_data = { + 'onnx_ops.randomuniform': { + 'A': {'low': -1.0, 'high': 1.0, 'seed': 0, 'shape': (1, 1)}, + 'D': {'low': -0.5, 'high': 0.5, 'seed': 0, 'shape': (1, 1)}, + 'E': {'low': -0.25, 'high': 0.5, 'seed': 0, 'shape': (1, 1)} + }, + 'onnx_ops.randomnormal': { + 'B': {'mean': -1.0, 'scale': 0.5, 'seed': 0, 'shape': (1, 1)}, + 'C': {'mean': 0.0, 'scale': 0.25, 'seed': 0, 'shape': (1, 1)}, } +} +onnx_integrators_fixed_seeded_noise = {} +integrators_runtime_params = None -integrators_runtime_params = ( - 'runtime_params={' - + ','.join([f'{k}: {{ "noise": {v} }}' for k, v in onnx_integrators_fixed_seeded_noise.items()]) - + '}' -) +for func_type in onnx_noise_data: + for node, args in onnx_noise_data[func_type].items(): + # generates output from onnx noise functions with seed 0 to be + # passed in in runtime_params during psyneulink execution + onnx_integrators_fixed_seeded_noise[node] = evaluate_onnx_expr( + func_type, base_parameters=args, evaluated_parameters=args + ) + +# high precision printing needed because script will be executed from string +# 16 is insufficient on windows +with np.printoptions(precision=32): + integrators_runtime_params = ( + 'runtime_params={' + + ','.join([f'{k}: {{ "noise": {v} }}' for k, v in onnx_integrators_fixed_seeded_noise.items()]) + + '}' + ) @pytest.mark.parametrize( @@ -219,7 +219,7 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for import modeci_mdf.execution_engine as ee # Get python script from file and execute - filename = f'{os.path.dirname(__file__)}/{filename}' + filename = os.path.join(os.path.dirname(__file__), filename) with open(filename, 'r') as orig_file: exec(orig_file.read()) inputs_str = str(input_dict).replace("'", '') @@ -240,3 +240,20 @@ def test_mdf_equivalence(filename, composition_name, input_dict, simple_edge_for ] assert pnl.safe_equals(orig_results, mdf_results) + + +@pytest.mark.parametrize('filename', ['model_basic.py']) +@pytest.mark.parametrize('fmt', ['json', 'yml']) +def test_generate_script_from_mdf(filename, fmt): + filename = os.path.join(os.path.dirname(__file__), filename) + outfi = filename.replace('.py', f'.{fmt}') + + with open(filename, 'r') as orig_file: + exec(orig_file.read()) + serialized = eval(f'pnl.get_mdf_serialized(comp, fmt="{fmt}")') + + with open(outfi, 'w') as f: + f.write(serialized) + + with open(outfi, 'r') as f: + assert pnl.generate_script_from_mdf(f.read()) == pnl.generate_script_from_mdf(outfi) diff --git a/tests/mechanisms/test_control_mechanism.py b/tests/mechanisms/test_control_mechanism.py index f43fdaf04b4..d5fdfd66204 100644 --- a/tests/mechanisms/test_control_mechanism.py +++ b/tests/mechanisms/test_control_mechanism.py @@ -109,6 +109,8 @@ def test_lc_control_modulated_mechanisms_all(self): assert T_1.parameter_ports[pnl.SLOPE].mod_afferents[0] in LC.control_signals[0].efferents assert T_2.parameter_ports[pnl.SLOPE].mod_afferents[0] in LC.control_signals[0].efferents + +class TestControlMechanism: def test_control_modulation(self): Tx = pnl.TransferMechanism(name='Tx') Ty = pnl.TransferMechanism(name='Ty') @@ -124,9 +126,11 @@ def test_control_modulation(self): # comp.show_graph() assert Tz.parameter_ports[pnl.SLOPE].mod_afferents[0].sender.owner == C + assert C.parameters.control_allocation.get() == [1] result = comp.run(inputs={Tx:[1,1], Ty:[4,4]}) assert comp.results == [[[4.], [4.]], [[4.], [4.]]] + def test_identicalness_of_control_and_gating(self): """Tests same configuration as gating in tests/mechansims/test_gating_mechanism""" Input_Layer = pnl.TransferMechanism(name='Input Layer', function=pnl.Logistic, size=2) @@ -168,6 +172,8 @@ def test_identicalness_of_control_and_gating(self): # c.add_linear_processing_pathway(pathway=z) comp.add_node(Control_Mechanism) + assert np.allclose(Control_Mechanism.parameters.control_allocation.get(), [0, 0, 0]) + stim_list = { Input_Layer: [[-1, 30]], Control_Mechanism: [1.0], @@ -190,14 +196,18 @@ def test_identicalness_of_control_and_gating(self): expected_results = [[0.96941429, 0.9837254 , 0.99217549]] assert np.allclose(results, expected_results) + def test_control_of_all_input_ports(self, comp_mode): mech = pnl.ProcessingMechanism(input_ports=['A','B','C']) control_mech = pnl.ControlMechanism(control=mech.input_ports) comp = pnl.Composition() comp.add_nodes([(mech, pnl.NodeRole.INPUT), (control_mech, pnl.NodeRole.INPUT)]) results = comp.run(inputs={mech:[[2],[2],[2]], control_mech:[2]}, num_trials=2, execution_mode=comp_mode) + + assert np.allclose(control_mech.parameters.control_allocation.get(), [1, 1, 1]) np.allclose(results, [[4],[4],[4]]) + def test_control_of_all_output_ports(self, comp_mode): mech = pnl.ProcessingMechanism(output_ports=[{pnl.VARIABLE: (pnl.OWNER_VALUE, 0)}, {pnl.VARIABLE: (pnl.OWNER_VALUE, 0)}, @@ -206,6 +216,8 @@ def test_control_of_all_output_ports(self, comp_mode): comp = pnl.Composition() comp.add_nodes([(mech, pnl.NodeRole.INPUT), (control_mech, pnl.NodeRole.INPUT)]) results = comp.run(inputs={mech:[[2]], control_mech:[3]}, num_trials=2, execution_mode=comp_mode) + + assert np.allclose(control_mech.parameters.control_allocation.get(), [1, 1, 1]) np.allclose(results, [[6],[6],[6]]) def test_control_signal_default_allocation_specification(self): @@ -227,6 +239,7 @@ def test_control_signal_default_allocation_specification(self): comp = pnl.Composition() comp.add_nodes([m1,m2,m3]) comp.add_controller(c1) + assert np.allclose(c1.parameters.control_allocation.get(), [10, 10, 10]) assert c1.control_signals[0].value == [10] # defaultControlAllocation should be assigned # (as no default_allocation from pnl.ControlMechanism) assert m1.parameter_ports[pnl.SLOPE].value == [1] @@ -266,6 +279,7 @@ def test_control_signal_default_allocation_specification(self): comp = pnl.Composition() comp.add_nodes([m1,m2,m3]) comp.add_controller(c2) + assert np.allclose(c2.parameters.control_allocation.get(), [10, 10, 10]) assert c2.control_signals[0].value == [4] # default_allocation from pnl.ControlMechanism assigned assert m1.parameter_ports[pnl.SLOPE].value == [10] # has not yet received pnl.ControlSignal value assert c2.control_signals[1].value == [5] # default_allocation from pnl.ControlSignal assigned (converted scalar) diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index ced6f68ae8a..4780b575692 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -248,6 +248,7 @@ class TestProcessingMechanismStandardOutputPorts: (MAX_ABS_INDICATOR, [0, 0, 1]), (MAX_ABS_ONE_HOT, [0, 0, 4]), (MAX_VAL, [2]), + (PROB, [0, 2, 0]), ], ids=lambda x: x if isinstance(x, str) else "") def test_output_ports(self, mech_mode, op, expected, benchmark): @@ -265,7 +266,6 @@ def test_output_ports(self, mech_mode, op, expected, benchmark): (STANDARD_DEVIATION, [1.24721913]), (VARIANCE, [1.55555556]), (MAX_ABS_VAL, [4]), - (PROB, [0, 2, 0]), ], ids=lambda x: x if isinstance(x, str) else "") def test_output_ports2(self, op, expected): diff --git a/tests/misc/test_parameters.py b/tests/misc/test_parameters.py index 7f9cf8828c8..98af182a686 100644 --- a/tests/misc/test_parameters.py +++ b/tests/misc/test_parameters.py @@ -6,6 +6,11 @@ import warnings +NO_PARAMETERS = "NO_PARAMETERS" +NO_INIT = "NO_INIT" +NO_VALUE = "NO_VALUE" + + def shared_parameter_warning_regex(param_name, shared_name=None): if shared_name is None: shared_name = param_name @@ -83,6 +88,19 @@ def test_parameter_values_overriding(ancestor, child, should_override, reset_var assert child.parameters.variable.default_value == original_child_variable +def test_unspecified_inheritance(): + class NewTM(pnl.TransferMechanism): + class Parameters(pnl.TransferMechanism.Parameters): + pass + + assert NewTM.parameters.variable._inherited + NewTM.parameters.variable.default_value = -1 + assert not NewTM.parameters.variable._inherited + + NewTM.parameters.variable.reset() + assert NewTM.parameters.variable._inherited + + @pytest.mark.parametrize('obj, param_name, alias_name', param_alias_data) def test_aliases(obj, param_name, alias_name): obj = obj() @@ -245,11 +263,15 @@ def test_copy(): [ (pnl.AdaptiveIntegrator, {'rate': None}, 'rate', False), (pnl.AdaptiveIntegrator, {'rate': None}, 'multiplicative_param', False), + (pnl.AdaptiveIntegrator, {'rate': 0.5}, 'additive_param', False), (pnl.AdaptiveIntegrator, {'rate': 0.5}, 'rate', True), (pnl.AdaptiveIntegrator, {'rate': 0.5}, 'multiplicative_param', True), (pnl.TransferMechanism, {'integration_rate': None}, 'integration_rate', False), (pnl.TransferMechanism, {'integration_rate': 0.5}, 'integration_rate', True), - ] + (pnl.TransferMechanism, {'initial_value': 0}, 'initial_value', True), + (pnl.TransferMechanism, {'initial_value': None}, 'initial_value', False), + (pnl.TransferMechanism, {}, 'initial_value', False), + ], ) def test_user_specified(cls_, kwargs, parameter, is_user_specified): c = cls_(**kwargs) @@ -269,6 +291,17 @@ def test_function_user_specified(kwargs, parameter, is_user_specified): assert getattr(t.function.parameters, parameter)._user_specified == is_user_specified +# sort param names or pytest-xdist may cause failure +# see https://github.com/pytest-dev/pytest/issues/4101 +@pytest.mark.parametrize('attr', sorted(pnl.Parameter._additional_param_attr_properties)) +def test_additional_param_attrs(attr): + assert hasattr(pnl.Parameter, f'_set_{attr}'), ( + f'To include {attr} in Parameter._additional_param_attr_properties, you' + f' must add a _set_{attr} method on Parameter. If this is unneeded,' + ' remove it from Parameter._additional_param_attr_properties.' + ) + + class TestSharedParameters: recurrent_mech = pnl.RecurrentTransferMechanism(default_variable=[0, 0], enable_learning=True) @@ -418,3 +451,185 @@ def test_conflict_no_warning_parser(self): raise delattr(pnl.AdaptiveIntegrator.Parameters, '_parse_noise') + + +class TestSpecificationType: + @staticmethod + def _create_params_class_variant(cls_param, init_param, parent_class=pnl.Component): + # init_param as Parameter doesn't make sense, only check cls_param + if cls_param is pnl.Parameter: + cls_param = pnl.Parameter() + + if cls_param is NO_PARAMETERS: + if init_param is NO_INIT: + + class TestComponent(parent_class): + pass + + else: + + class TestComponent(parent_class): + @pnl.core.globals.parameters.check_user_specified + def __init__(self, p=init_param): + super().__init__(p=p) + + elif cls_param is NO_VALUE: + if init_param is NO_INIT: + + class TestComponent(parent_class): + class Parameters(parent_class.Parameters): + pass + + else: + + class TestComponent(parent_class): + class Parameters(parent_class.Parameters): + pass + + @pnl.core.globals.parameters.check_user_specified + def __init__(self, p=init_param): + super().__init__(p=p) + + else: + if init_param is NO_INIT: + + class TestComponent(parent_class): + class Parameters(parent_class.Parameters): + p = cls_param + + else: + + class TestComponent(parent_class): + class Parameters(parent_class.Parameters): + p = cls_param + + @pnl.core.globals.parameters.check_user_specified + def __init__(self, p=init_param): + super().__init__(p=p) + + return TestComponent + + @pytest.mark.parametrize( + "cls_param, init_param, param_default", + [ + (1, 1, 1), + (1, None, 1), + (None, 1, 1), + (1, NO_INIT, 1), + ("foo", "foo", "foo"), + (np.array(1), np.array(1), np.array(1)), + (np.array([1]), np.array([1]), np.array([1])), + ], + ) + def test_valid_assignment(self, cls_param, init_param, param_default): + TestComponent = TestSpecificationType._create_params_class_variant(cls_param, init_param) + assert TestComponent.defaults.p == param_default + assert TestComponent.parameters.p.default_value == param_default + + @pytest.mark.parametrize( + "cls_param, init_param", + [ + (1, 2), + (2, 1), + (1, 1.0), + (np.array(1), 1), + (np.array([1]), 1), + (np.array([1]), np.array(1)), + ("foo", "bar"), + ], + ) + def test_conflicting_assignments(self, cls_param, init_param): + with pytest.raises(AssertionError, match="Conflicting default parameter"): + TestSpecificationType._create_params_class_variant(cls_param, init_param) + + @pytest.mark.parametrize( + "child_cls_param, child_init_param, parent_value, child_value", + [ + (NO_PARAMETERS, NO_INIT, 1, 1), + (NO_VALUE, NO_INIT, 1, 1), + (2, NO_INIT, 1, 2), + (NO_PARAMETERS, 2, 1, 2), + (NO_VALUE, 2, 1, 2), + (2, 2, 1, 2), + ], + ) + @pytest.mark.parametrize( + "parent_cls_param, parent_init_param", + [(1, 1), (1, None), (None, 1), (pnl.Parameter, 1)], + ) + def test_inheritance( + self, + parent_cls_param, + parent_init_param, + child_cls_param, + child_init_param, + parent_value, + child_value, + ): + TestParent = TestSpecificationType._create_params_class_variant( + parent_cls_param, parent_init_param + ) + TestChild = TestSpecificationType._create_params_class_variant( + child_cls_param, child_init_param, parent_class=TestParent + ) + + assert TestParent.defaults.p == parent_value + assert TestParent.parameters.p.default_value == parent_value + + assert TestChild.defaults.p == child_value + assert TestChild.parameters.p.default_value == child_value + + @pytest.mark.parametrize("set_from_defaults", [True, False]) + @pytest.mark.parametrize( + "child_cls_param, child_init_param", + [(1, 1), (1, None), (None, 1), (NO_PARAMETERS, 1), (1, NO_INIT)], + ) + @pytest.mark.parametrize("parent_cls_param, parent_init_param", [(0, 0), (0, None)]) + def test_set_and_reset( + self, + parent_cls_param, + parent_init_param, + child_cls_param, + child_init_param, + set_from_defaults, + ): + def set_p_default(obj, val): + if set_from_defaults: + obj.defaults.p = val + else: + obj.parameters.p.default_value = val + + TestParent = TestSpecificationType._create_params_class_variant( + parent_cls_param, parent_init_param + ) + TestChild = TestSpecificationType._create_params_class_variant( + child_cls_param, child_init_param, parent_class=TestParent + ) + TestGrandchild = TestSpecificationType._create_params_class_variant( + NO_PARAMETERS, NO_INIT, parent_class=TestChild + ) + + set_p_default(TestChild, 10) + assert TestParent.defaults.p == 0 + assert TestChild.defaults.p == 10 + assert TestGrandchild.defaults.p == 10 + + set_p_default(TestGrandchild, 20) + assert TestParent.defaults.p == 0 + assert TestChild.defaults.p == 10 + assert TestGrandchild.defaults.p == 20 + + TestChild.parameters.p.reset() + assert TestParent.defaults.p == 0 + assert TestChild.defaults.p == 1 + assert TestGrandchild.defaults.p == 20 + + TestGrandchild.parameters.p.reset() + assert TestParent.defaults.p == 0 + assert TestChild.defaults.p == 1 + assert TestGrandchild.defaults.p == 1 + + set_p_default(TestGrandchild, 20) + assert TestParent.defaults.p == 0 + assert TestChild.defaults.p == 1 + assert TestGrandchild.defaults.p == 20 diff --git a/tests/models/test_bi_percepts.py b/tests/models/test_bi_percepts.py index 2b4dbc2df43..5d819d890bf 100644 --- a/tests/models/test_bi_percepts.py +++ b/tests/models/test_bi_percepts.py @@ -126,7 +126,10 @@ def get_node(percept, node_id): # run the model res = bp_comp.run(input_dict, num_trials=n_time_steps, execution_mode=comp_mode) - np.testing.assert_allclose(res, expected) + if pytest.helpers.llvm_current_fp_precision() == 'fp32': + assert np.allclose(res, expected) + else: + np.testing.assert_allclose(res, expected) # Test that order of CIM ports follows order of Nodes in self.nodes for i in range(n_nodes): diff --git a/tests/models/test_greedy_agent.py b/tests/models/test_greedy_agent.py index 676150e3e1d..1ee9c192628 100644 --- a/tests/models/test_greedy_agent.py +++ b/tests/models/test_greedy_agent.py @@ -119,7 +119,8 @@ def test_simplified_greedy_agent_random(benchmark, comp_mode): pytest.param([a / 10.0 for a in range(0, 101)], marks=pytest.mark.stress), ], ids=lambda x: len(x)) @pytest.mark.parametrize('prng', ['Default', 'Philox']) -def test_predator_prey(benchmark, mode, prng, samples): +@pytest.mark.parametrize('fp_type', [pnl.core.llvm.ir.DoubleType, pnl.core.llvm.ir.FloatType]) +def test_predator_prey(benchmark, mode, prng, samples, fp_type): if len(samples) > 10 and mode not in {pnl.ExecutionMode.LLVM, pnl.ExecutionMode.LLVMExec, pnl.ExecutionMode.LLVMRun, @@ -132,6 +133,9 @@ def test_predator_prey(benchmark, mode, prng, samples): # OCM default mode is Python ocm_mode = 'Python' + # Instantiate LLVMBuilderContext using the preferred fp type + pnl.core.llvm.builder_context.LLVMBuilderContext(fp_type()) + benchmark.group = "Predator-Prey " + str(len(samples)) obs_len = 3 obs_coords = 2 @@ -234,11 +238,16 @@ def action_fn(variable): if prng == 'Default': assert np.allclose(run_results[0], [[0.9705216285127504, -0.1343332460369043]]) elif prng == 'Philox': - assert np.allclose(run_results[0], [[-0.16882940384606543, -0.07280074899749223]]) + if mode == pnl.ExecutionMode.Python or pytest.helpers.llvm_current_fp_precision() == 'fp64': + assert np.allclose(run_results[0], [[-0.16882940384606543, -0.07280074899749223]]) + elif pytest.helpers.llvm_current_fp_precision() == 'fp32': + assert np.allclose(run_results[0], [[-0.8639436960220337, 0.4983368515968323]]) + else: + assert False, "Unkown FP type!" else: assert False, "Unknown PRNG!" - if mode is pnl.ExecutionMode.Python: + if mode == pnl.ExecutionMode.Python: # FIXEM: The results are 'close' for both Philox and MT, # because they're dominated by costs assert np.allclose(np.asfarray(ocm.function.saved_values).flatten(), diff --git a/tests/projections/test_projection_specifications.py b/tests/projections/test_projection_specifications.py index 52358f04f52..02edd207534 100644 --- a/tests/projections/test_projection_specifications.py +++ b/tests/projections/test_projection_specifications.py @@ -28,14 +28,15 @@ def test_projection_specification_formats(self): M3_M4_matrix_A = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 5) M3_M4_matrix_B = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3) - M1_M2_proj = pnl.MappingProjection(matrix=M1_M2_matrix) + M1_M2_proj = pnl.MappingProjection(matrix=M1_M2_matrix, name='M1_M2_matrix') M2_M3_proj = pnl.MappingProjection(sender=M2, receiver=M3, matrix={pnl.VALUE: M2_M3_matrix, pnl.FUNCTION: pnl.AccumulatorIntegrator, pnl.FUNCTION_PARAMS: {pnl.DEFAULT_VARIABLE: M2_M3_matrix, - pnl.INITIALIZER: M2_M3_matrix}}) - M3_M4_proj_A = pnl.MappingProjection(sender=M3, receiver=M4, matrix=M3_M4_matrix_A) + pnl.INITIALIZER: M2_M3_matrix}}, + name='M2_M3_proj') + M3_M4_proj_A = pnl.MappingProjection(sender=M3, receiver=M4, matrix=M3_M4_matrix_A, name='M3_M4_proj_A') c = pnl.Composition() c.add_linear_processing_pathway(pathway=[M1, M1_M2_proj, diff --git a/tutorial_requirements.txt b/tutorial_requirements.txt index 728aa0c0eab..6d141f739cd 100644 --- a/tutorial_requirements.txt +++ b/tutorial_requirements.txt @@ -1,3 +1,3 @@ -graphviz<0.20.0 +graphviz<0.21.0 jupyter<=1.0.0 -matplotlib<3.5.2 +matplotlib<3.5.3